summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-10-02 08:45:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-10-02 08:45:08 -0400
commit59458f40e25915a355d8b1d701425fe9f4f9ea23 (patch)
treef1c9a2934df686e36d75f759ab7313b6f0e0e5f9 /arch/s390
parent825f9075d74028d11d7f5932f04e1b5db3022b51 (diff)
parentd834c16516d1ebec4766fc58c059bf01311e6045 (diff)
downloadlinux-59458f40e25915a355d8b1d701425fe9f4f9ea23.tar.bz2
Merge branch 'master' into gfs2
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig12
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/appldata/appldata_base.c6
-rw-r--r--arch/s390/crypto/crypt_s390.h204
-rw-r--r--arch/s390/hypfs/hypfs_diag.c26
-rw-r--r--arch/s390/hypfs/inode.c17
-rw-r--r--arch/s390/kernel/compat_linux.c5
-rw-r--r--arch/s390/kernel/compat_wrapper.S442
-rw-r--r--arch/s390/kernel/cpcmd.c83
-rw-r--r--arch/s390/kernel/entry.S469
-rw-r--r--arch/s390/kernel/entry64.S443
-rw-r--r--arch/s390/kernel/head.S624
-rw-r--r--arch/s390/kernel/head31.S11
-rw-r--r--arch/s390/kernel/head64.S443
-rw-r--r--arch/s390/kernel/ipl.c21
-rw-r--r--arch/s390/kernel/process.c5
-rw-r--r--arch/s390/kernel/reipl.S75
-rw-r--r--arch/s390/kernel/reipl64.S93
-rw-r--r--arch/s390/kernel/relocate_kernel.S74
-rw-r--r--arch/s390/kernel/relocate_kernel64.S82
-rw-r--r--arch/s390/kernel/semaphore.c22
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c73
-rw-r--r--arch/s390/kernel/time.c26
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/lib/Makefile1
-rw-r--r--arch/s390/lib/delay.c11
-rw-r--r--arch/s390/lib/div64.c151
-rw-r--r--arch/s390/lib/spinlock.c62
-rw-r--r--arch/s390/lib/uaccess_mvcos.c22
-rw-r--r--arch/s390/lib/uaccess_std.c36
-rw-r--r--arch/s390/math-emu/math.c126
-rw-r--r--arch/s390/math-emu/sfp-util.h73
-rw-r--r--arch/s390/mm/extmem.c16
-rw-r--r--arch/s390/mm/fault.c37
-rw-r--r--arch/s390/mm/init.c41
36 files changed, 1953 insertions, 1885 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b216ca659cdf..f900a516f099 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -51,6 +51,10 @@ config 64BIT
Select this option if you have a 64 bit IBM zSeries machine
and want to use the 64 bit addressing mode.
+config 32BIT
+ bool
+ default y if !64BIT
+
config SMP
bool "Symmetric multi-processing support"
---help---
@@ -149,6 +153,14 @@ config MARCH_Z990
This will be slightly faster but does not work on
older machines such as the z900.
+config MARCH_Z9_109
+ bool "IBM System z9"
+ help
+ Select this to enable optimizations for IBM System z9-109, IBM
+ System z9 Enterprise Class (z9 EC), and IBM System z9 Business
+ Class (z9 BC). The kernel will be slightly faster but will not
+ work on older machines such as the z990, z890, z900, and z800.
+
endchoice
config PACK_STACK
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 74ef57dcfa60..5deb9f7544a1 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -33,6 +33,7 @@ endif
cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5)
cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
+cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
#
# Prevent tail-call optimizations, to get clearer backtraces:
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index b69ed742f981..2b1e6c9a6e0e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -16,7 +16,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
-#include <linux/page-flags.h>
+#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
@@ -157,12 +157,12 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
.prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
.prod_fn = 0xD5D3, /* "NL" */
- .record_nr = record_nr,
.version_nr = 0xF2F6, /* "26" */
.release_nr = 0xF0F1, /* "01" */
- .mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1],
};
+ id.record_nr = record_nr;
+ id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
return appldata_asm(&id, function, (void *) buffer, length);
}
/************************ timer, work, DIAG <END> ****************************/
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index efd836c2e4a6..2b137089f625 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -105,63 +105,6 @@ struct crypt_s390_query_status {
};
/*
- * Standard fixup and ex_table sections for crypt_s390 inline functions.
- * label 0: the s390 crypto operation
- * label 1: just after 1 to catch illegal operation exception
- * (unsupported model)
- * label 6: the return point after fixup
- * label 7: set error value if exception _in_ crypto operation
- * label 8: set error value if illegal operation exception
- * [ret] is the variable to receive the error code
- * [ERR] is the error code value
- */
-#ifndef CONFIG_64BIT
-#define __crypt_s390_fixup \
- ".section .fixup,\"ax\" \n" \
- "7: lhi %0,%h[e1] \n" \
- " bras 1,9f \n" \
- " .long 6b \n" \
- "8: lhi %0,%h[e2] \n" \
- " bras 1,9f \n" \
- " .long 6b \n" \
- "9: l 1,0(1) \n" \
- " br 1 \n" \
- ".previous \n" \
- ".section __ex_table,\"a\" \n" \
- " .align 4 \n" \
- " .long 0b,7b \n" \
- " .long 1b,8b \n" \
- ".previous"
-#else /* CONFIG_64BIT */
-#define __crypt_s390_fixup \
- ".section .fixup,\"ax\" \n" \
- "7: lhi %0,%h[e1] \n" \
- " jg 6b \n" \
- "8: lhi %0,%h[e2] \n" \
- " jg 6b \n" \
- ".previous\n" \
- ".section __ex_table,\"a\" \n" \
- " .align 8 \n" \
- " .quad 0b,7b \n" \
- " .quad 1b,8b \n" \
- ".previous"
-#endif /* CONFIG_64BIT */
-
-/*
- * Standard code for setting the result of s390 crypto instructions.
- * %0: the register which will receive the result
- * [result]: the register containing the result (e.g. second operand length
- * to compute number of processed bytes].
- */
-#ifndef CONFIG_64BIT
-#define __crypt_s390_set_result \
- " lr %0,%[result] \n"
-#else /* CONFIG_64BIT */
-#define __crypt_s390_set_result \
- " lgr %0,%[result] \n"
-#endif
-
-/*
* Executes the KM (CIPHER MESSAGE) operation of the CPU.
* @param func: the function code passed to KM; see crypt_s390_km_func
* @param param: address of parameter block; see POP for details on each func
@@ -176,28 +119,24 @@ crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
- register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
+ register u8* __dest asm("4") = dest;
int ret;
- ret = 0;
- __asm__ __volatile__ (
- "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
+ asm volatile(
+ "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
"1: brc 1,0b \n" /* handle partial completion */
- __crypt_s390_set_result
- "6: \n"
- __crypt_s390_fixup
- : "+d" (ret), "+a" (__dest), "+a" (__src),
- [result] "+d" (__src_len)
- : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
- "a" (__param)
- : "cc", "memory"
- );
- if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
- ret = src_len - ret;
- }
- return ret;
+ " ahi %0,%h7\n"
+ "2: ahi %0,%h8\n"
+ "3:\n"
+ EX_TABLE(0b,3b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
+ : "d" (__func), "a" (__param), "0" (-EFAULT),
+ "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/*
@@ -215,28 +154,24 @@ crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
{
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
- register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
+ register u8* __dest asm("4") = dest;
int ret;
- ret = 0;
- __asm__ __volatile__ (
- "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
+ asm volatile(
+ "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
"1: brc 1,0b \n" /* handle partial completion */
- __crypt_s390_set_result
- "6: \n"
- __crypt_s390_fixup
- : "+d" (ret), "+a" (__dest), "+a" (__src),
- [result] "+d" (__src_len)
- : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
- "a" (__param)
- : "cc", "memory"
- );
- if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
- ret = src_len - ret;
- }
- return ret;
+ " ahi %0,%h7\n"
+ "2: ahi %0,%h8\n"
+ "3:\n"
+ EX_TABLE(0b,3b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
+ : "d" (__func), "a" (__param), "0" (-EFAULT),
+ "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/*
@@ -258,22 +193,19 @@ crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
register long __src_len asm("3") = src_len;
int ret;
- ret = 0;
- __asm__ __volatile__ (
- "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */
- "1: brc 1,0b \n" /* handle partical completion */
- __crypt_s390_set_result
- "6: \n"
- __crypt_s390_fixup
- : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
- : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
- "a" (__param)
- : "cc", "memory"
- );
- if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){
- ret = src_len - ret;
- }
- return ret;
+ asm volatile(
+ "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ " ahi %0,%h6\n"
+ "2: ahi %0,%h7\n"
+ "3:\n"
+ EX_TABLE(0b,3b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len)
+ : "d" (__func), "a" (__param), "0" (-EFAULT),
+ "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/*
@@ -294,22 +226,19 @@ crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
register long __src_len asm("3") = src_len;
int ret;
- ret = 0;
- __asm__ __volatile__ (
- "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */
- "1: brc 1,0b \n" /* handle partical completion */
- __crypt_s390_set_result
- "6: \n"
- __crypt_s390_fixup
- : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
- : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
- "a" (__param)
- : "cc", "memory"
- );
- if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
- ret = src_len - ret;
- }
- return ret;
+ asm volatile(
+ "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ " ahi %0,%h6\n"
+ "2: ahi %0,%h7\n"
+ "3:\n"
+ EX_TABLE(0b,3b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len)
+ : "d" (__func), "a" (__param), "0" (-EFAULT),
+ "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/*
@@ -331,22 +260,19 @@ crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
register long __src_len asm("3") = src_len;
int ret;
- ret = 0;
- __asm__ __volatile__ (
- "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */
- "1: brc 1,0b \n" /* handle partical completion */
- __crypt_s390_set_result
- "6: \n"
- __crypt_s390_fixup
- : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
- : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
- "a" (__param)
- : "cc", "memory"
- );
- if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
- ret = src_len - ret;
- }
- return ret;
+ asm volatile(
+ "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ " ahi %0,%h6\n"
+ "2: ahi %0,%h7\n"
+ "3:\n"
+ EX_TABLE(0b,3b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+a" (__src), "+d" (__src_len)
+ : "d" (__func), "a" (__param), "0" (-EFAULT),
+ "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
}
/**
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 75144efbb92b..443fa377d9ff 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -333,22 +333,14 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr)
register unsigned long _subcode asm("0") = subcode;
register unsigned long _size asm("1") = size;
- asm volatile (" diag %2,%0,0x204\n"
- "0: \n" ".section __ex_table,\"a\"\n"
-#ifndef __s390x__
- " .align 4\n"
- " .long 0b,0b\n"
-#else
- " .align 8\n"
- " .quad 0b,0b\n"
-#endif
- ".previous":"+d" (_subcode), "+d"(_size)
- :"d"(addr)
- :"memory");
+ asm volatile(
+ " diag %2,%0,0x204\n"
+ "0:\n"
+ EX_TABLE(0b,0b)
+ : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
if (_subcode)
return -1;
- else
- return _size;
+ return _size;
}
/*
@@ -403,7 +395,8 @@ static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
*pages = 1;
return diag204_alloc_rbuf();
} else {/* INFO_EXT */
- *pages = diag204(SUBC_RSI | INFO_EXT, 0, NULL);
+ *pages = diag204((unsigned long)SUBC_RSI |
+ (unsigned long)INFO_EXT, 0, NULL);
if (*pages <= 0)
return ERR_PTR(-ENOSYS);
else
@@ -490,8 +483,7 @@ out:
static void diag224(void *ptr)
{
- asm volatile(" diag %0,%1,0x224\n"
- : :"d" (0), "d"(ptr) : "memory");
+ asm volatile("diag %0,%1,0x224" : :"d" (0), "d"(ptr) : "memory");
}
static int diag224_get_name_table(void)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 813fc21358f9..cd702ae45d6d 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -134,12 +134,20 @@ static int hypfs_open(struct inode *inode, struct file *filp)
return 0;
}
-static ssize_t hypfs_aio_read(struct kiocb *iocb, __user char *buf,
- size_t count, loff_t offset)
+static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t offset)
{
char *data;
size_t len;
struct file *filp = iocb->ki_filp;
+ /* XXX: temporary */
+ char __user *buf = iov[0].iov_base;
+ size_t count = iov[0].iov_len;
+
+ if (nr_segs != 1) {
+ count = -EINVAL;
+ goto out;
+ }
data = filp->private_data;
len = strlen(data);
@@ -158,12 +166,13 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, __user char *buf,
out:
return count;
}
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const char __user *buf,
- size_t count, loff_t pos)
+static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t offset)
{
int rc;
struct super_block *sb;
struct hypfs_sb_info *fs_info;
+ size_t count = iov_length(iov, nr_segs);
sb = iocb->ki_filp->f_dentry->d_inode->i_sb;
fs_info = sb->s_fs_info;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 91b2884fa5c4..c46e3d48e410 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -544,10 +544,7 @@ sys32_execve(struct pt_regs regs)
current->ptrace &= ~PT_DTRACE;
task_unlock(current);
current->thread.fp_regs.fpc=0;
- __asm__ __volatile__
- ("sr 0,0\n\t"
- "sfpc 0,0\n\t"
- : : :"0");
+ asm volatile("sfpc %0,0" : : "d" (0));
}
putname(filename);
out:
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 4d53b2739357..4aabeeaa7cf7 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -4,97 +4,97 @@
*
* Copyright (C) IBM Corp. 2000,2006
* Author(s): Gerhard Tonn (ton@de.ibm.com),
-* Thomas Spatzier (tspat@de.ibm.com)
-*/
+* Thomas Spatzier (tspat@de.ibm.com)
+*/
- .globl sys32_exit_wrapper
+ .globl sys32_exit_wrapper
sys32_exit_wrapper:
lgfr %r2,%r2 # int
jg sys_exit # branch to sys_exit
-
- .globl sys32_read_wrapper
+
+ .globl sys32_read_wrapper
sys32_read_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys32_read # branch to sys_read
- .globl sys32_write_wrapper
+ .globl sys32_write_wrapper
sys32_write_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
jg sys32_write # branch to system call
- .globl sys32_open_wrapper
+ .globl sys32_open_wrapper
sys32_open_wrapper:
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_open # branch to system call
- .globl sys32_close_wrapper
+ .globl sys32_close_wrapper
sys32_close_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_close # branch to system call
- .globl sys32_creat_wrapper
+ .globl sys32_creat_wrapper
sys32_creat_wrapper:
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_creat # branch to system call
- .globl sys32_link_wrapper
+ .globl sys32_link_wrapper
sys32_link_wrapper:
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_link # branch to system call
- .globl sys32_unlink_wrapper
+ .globl sys32_unlink_wrapper
sys32_unlink_wrapper:
llgtr %r2,%r2 # const char *
jg sys_unlink # branch to system call
- .globl sys32_chdir_wrapper
+ .globl sys32_chdir_wrapper
sys32_chdir_wrapper:
llgtr %r2,%r2 # const char *
jg sys_chdir # branch to system call
- .globl sys32_time_wrapper
+ .globl sys32_time_wrapper
sys32_time_wrapper:
llgtr %r2,%r2 # int *
jg compat_sys_time # branch to system call
- .globl sys32_mknod_wrapper
+ .globl sys32_mknod_wrapper
sys32_mknod_wrapper:
llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # int
+ lgfr %r3,%r3 # int
llgfr %r4,%r4 # dev
jg sys_mknod # branch to system call
- .globl sys32_chmod_wrapper
+ .globl sys32_chmod_wrapper
sys32_chmod_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # mode_t
jg sys_chmod # branch to system call
- .globl sys32_lchown16_wrapper
+ .globl sys32_lchown16_wrapper
sys32_lchown16_wrapper:
llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- llgfr %r4,%r4 # __kernel_old_uid_emu31_t
+ llgfr %r3,%r3 # __kernel_old_uid_emu31_t
+ llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_lchown16 # branch to system call
- .globl sys32_lseek_wrapper
+ .globl sys32_lseek_wrapper
sys32_lseek_wrapper:
llgfr %r2,%r2 # unsigned int
lgfr %r3,%r3 # off_t
llgfr %r4,%r4 # unsigned int
jg sys_lseek # branch to system call
-#sys32_getpid_wrapper # void
+#sys32_getpid_wrapper # void
- .globl sys32_mount_wrapper
+ .globl sys32_mount_wrapper
sys32_mount_wrapper:
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
@@ -103,19 +103,19 @@ sys32_mount_wrapper:
llgtr %r6,%r6 # void *
jg compat_sys_mount # branch to system call
- .globl sys32_oldumount_wrapper
+ .globl sys32_oldumount_wrapper
sys32_oldumount_wrapper:
llgtr %r2,%r2 # char *
jg sys_oldumount # branch to system call
- .globl sys32_setuid16_wrapper
+ .globl sys32_setuid16_wrapper
sys32_setuid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_uid_emu31_t
jg sys32_setuid16 # branch to system call
-#sys32_getuid16_wrapper # void
+#sys32_getuid16_wrapper # void
- .globl sys32_ptrace_wrapper
+ .globl sys32_ptrace_wrapper
sys32_ptrace_wrapper:
lgfr %r2,%r2 # long
lgfr %r3,%r3 # long
@@ -123,168 +123,168 @@ sys32_ptrace_wrapper:
llgfr %r5,%r5 # long
jg sys_ptrace # branch to system call
- .globl sys32_alarm_wrapper
+ .globl sys32_alarm_wrapper
sys32_alarm_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_alarm # branch to system call
-#sys32_pause_wrapper # void
+#sys32_pause_wrapper # void
- .globl compat_sys_utime_wrapper
+ .globl compat_sys_utime_wrapper
compat_sys_utime_wrapper:
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_utimbuf *
jg compat_sys_utime # branch to system call
- .globl sys32_access_wrapper
+ .globl sys32_access_wrapper
sys32_access_wrapper:
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_access # branch to system call
- .globl sys32_nice_wrapper
+ .globl sys32_nice_wrapper
sys32_nice_wrapper:
lgfr %r2,%r2 # int
jg sys_nice # branch to system call
-#sys32_sync_wrapper # void
+#sys32_sync_wrapper # void
- .globl sys32_kill_wrapper
+ .globl sys32_kill_wrapper
sys32_kill_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_kill # branch to system call
- .globl sys32_rename_wrapper
+ .globl sys32_rename_wrapper
sys32_rename_wrapper:
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_rename # branch to system call
- .globl sys32_mkdir_wrapper
+ .globl sys32_mkdir_wrapper
sys32_mkdir_wrapper:
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_mkdir # branch to system call
- .globl sys32_rmdir_wrapper
+ .globl sys32_rmdir_wrapper
sys32_rmdir_wrapper:
llgtr %r2,%r2 # const char *
jg sys_rmdir # branch to system call
- .globl sys32_dup_wrapper
+ .globl sys32_dup_wrapper
sys32_dup_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_dup # branch to system call
- .globl sys32_pipe_wrapper
+ .globl sys32_pipe_wrapper
sys32_pipe_wrapper:
llgtr %r2,%r2 # u32 *
jg sys_pipe # branch to system call
- .globl compat_sys_times_wrapper
+ .globl compat_sys_times_wrapper
compat_sys_times_wrapper:
llgtr %r2,%r2 # struct compat_tms *
jg compat_sys_times # branch to system call
- .globl sys32_brk_wrapper
+ .globl sys32_brk_wrapper
sys32_brk_wrapper:
llgtr %r2,%r2 # unsigned long
jg sys_brk # branch to system call
- .globl sys32_setgid16_wrapper
+ .globl sys32_setgid16_wrapper
sys32_setgid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_gid_emu31_t
jg sys32_setgid16 # branch to system call
-#sys32_getgid16_wrapper # void
+#sys32_getgid16_wrapper # void
.globl sys32_signal_wrapper
sys32_signal_wrapper:
- lgfr %r2,%r2 # int
+ lgfr %r2,%r2 # int
llgtr %r3,%r3 # __sighandler_t
jg sys_signal
-#sys32_geteuid16_wrapper # void
+#sys32_geteuid16_wrapper # void
-#sys32_getegid16_wrapper # void
+#sys32_getegid16_wrapper # void
- .globl sys32_acct_wrapper
+ .globl sys32_acct_wrapper
sys32_acct_wrapper:
llgtr %r2,%r2 # char *
jg sys_acct # branch to system call
- .globl sys32_umount_wrapper
+ .globl sys32_umount_wrapper
sys32_umount_wrapper:
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_umount # branch to system call
- .globl compat_sys_ioctl_wrapper
+ .globl compat_sys_ioctl_wrapper
compat_sys_ioctl_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned int
jg compat_sys_ioctl # branch to system call
- .globl compat_sys_fcntl_wrapper
+ .globl compat_sys_fcntl_wrapper
compat_sys_fcntl_wrapper:
llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
+ llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned long
jg compat_sys_fcntl # branch to system call
- .globl sys32_setpgid_wrapper
+ .globl sys32_setpgid_wrapper
sys32_setpgid_wrapper:
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # pid_t
jg sys_setpgid # branch to system call
- .globl sys32_umask_wrapper
+ .globl sys32_umask_wrapper
sys32_umask_wrapper:
lgfr %r2,%r2 # int
jg sys_umask # branch to system call
- .globl sys32_chroot_wrapper
+ .globl sys32_chroot_wrapper
sys32_chroot_wrapper:
llgtr %r2,%r2 # char *
jg sys_chroot # branch to system call
.globl sys32_ustat_wrapper
sys32_ustat_wrapper:
- llgfr %r2,%r2 # dev_t
+ llgfr %r2,%r2 # dev_t
llgtr %r3,%r3 # struct ustat *
jg sys_ustat
- .globl sys32_dup2_wrapper
+ .globl sys32_dup2_wrapper
sys32_dup2_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_dup2 # branch to system call
-#sys32_getppid_wrapper # void
+#sys32_getppid_wrapper # void
-#sys32_getpgrp_wrapper # void
+#sys32_getpgrp_wrapper # void
-#sys32_setsid_wrapper # void
+#sys32_setsid_wrapper # void
- .globl sys32_sigaction_wrapper
+ .globl sys32_sigaction_wrapper
sys32_sigaction_wrapper:
- lgfr %r2,%r2 # int
+ lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct old_sigaction *
llgtr %r4,%r4 # struct old_sigaction32 *
jg sys32_sigaction # branch to system call
- .globl sys32_setreuid16_wrapper
+ .globl sys32_setreuid16_wrapper
sys32_setreuid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_uid_emu31_t
+ llgfr %r3,%r3 # __kernel_old_uid_emu31_t
jg sys32_setreuid16 # branch to system call
- .globl sys32_setregid16_wrapper
+ .globl sys32_setregid16_wrapper
sys32_setregid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
- llgfr %r3,%r3 # __kernel_old_gid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_gid_emu31_t
+ llgfr %r3,%r3 # __kernel_old_gid_emu31_t
jg sys32_setregid16 # branch to system call
.globl sys_sigsuspend_wrapper
@@ -294,95 +294,95 @@ sys_sigsuspend_wrapper:
llgfr %r4,%r4 # old_sigset_t
jg sys_sigsuspend
- .globl compat_sys_sigpending_wrapper
+ .globl compat_sys_sigpending_wrapper
compat_sys_sigpending_wrapper:
llgtr %r2,%r2 # compat_old_sigset_t *
jg compat_sys_sigpending # branch to system call
- .globl sys32_sethostname_wrapper
+ .globl sys32_sethostname_wrapper
sys32_sethostname_wrapper:
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_sethostname # branch to system call
- .globl compat_sys_setrlimit_wrapper
+ .globl compat_sys_setrlimit_wrapper
compat_sys_setrlimit_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_setrlimit # branch to system call
- .globl compat_sys_old_getrlimit_wrapper
+ .globl compat_sys_old_getrlimit_wrapper
compat_sys_old_getrlimit_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_old_getrlimit # branch to system call
- .globl compat_sys_getrlimit_wrapper
+ .globl compat_sys_getrlimit_wrapper
compat_sys_getrlimit_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_getrlimit # branch to system call
- .globl sys32_mmap2_wrapper
+ .globl sys32_mmap2_wrapper
sys32_mmap2_wrapper:
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
jg sys32_mmap2 # branch to system call
- .globl compat_sys_getrusage_wrapper
+ .globl compat_sys_getrusage_wrapper
compat_sys_getrusage_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct rusage_emu31 *
jg compat_sys_getrusage # branch to system call
- .globl sys32_gettimeofday_wrapper
+ .globl sys32_gettimeofday_wrapper
sys32_gettimeofday_wrapper:
llgtr %r2,%r2 # struct timeval_emu31 *
llgtr %r3,%r3 # struct timezone *
jg sys32_gettimeofday # branch to system call
- .globl sys32_settimeofday_wrapper
+ .globl sys32_settimeofday_wrapper
sys32_settimeofday_wrapper:
llgtr %r2,%r2 # struct timeval_emu31 *
llgtr %r3,%r3 # struct timezone *
jg sys32_settimeofday # branch to system call
- .globl sys32_getgroups16_wrapper
+ .globl sys32_getgroups16_wrapper
sys32_getgroups16_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
jg sys32_getgroups16 # branch to system call
- .globl sys32_setgroups16_wrapper
+ .globl sys32_setgroups16_wrapper
sys32_setgroups16_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
jg sys32_setgroups16 # branch to system call
- .globl sys32_symlink_wrapper
+ .globl sys32_symlink_wrapper
sys32_symlink_wrapper:
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_symlink # branch to system call
- .globl sys32_readlink_wrapper
+ .globl sys32_readlink_wrapper
sys32_readlink_wrapper:
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # char *
lgfr %r4,%r4 # int
jg sys_readlink # branch to system call
- .globl sys32_uselib_wrapper
+ .globl sys32_uselib_wrapper
sys32_uselib_wrapper:
llgtr %r2,%r2 # const char *
jg sys_uselib # branch to system call
- .globl sys32_swapon_wrapper
+ .globl sys32_swapon_wrapper
sys32_swapon_wrapper:
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_swapon # branch to system call
- .globl sys32_reboot_wrapper
+ .globl sys32_reboot_wrapper
sys32_reboot_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
@@ -390,121 +390,121 @@ sys32_reboot_wrapper:
llgtr %r5,%r5 # void *
jg sys_reboot # branch to system call
- .globl old32_readdir_wrapper
+ .globl old32_readdir_wrapper
old32_readdir_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg compat_sys_old_readdir # branch to system call
- .globl old32_mmap_wrapper
+ .globl old32_mmap_wrapper
old32_mmap_wrapper:
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
jg old32_mmap # branch to system call
- .globl sys32_munmap_wrapper
+ .globl sys32_munmap_wrapper
sys32_munmap_wrapper:
llgfr %r2,%r2 # unsigned long
- llgfr %r3,%r3 # size_t
+ llgfr %r3,%r3 # size_t
jg sys_munmap # branch to system call
- .globl sys32_truncate_wrapper
+ .globl sys32_truncate_wrapper
sys32_truncate_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned long
jg sys_truncate # branch to system call
- .globl sys32_ftruncate_wrapper
+ .globl sys32_ftruncate_wrapper
sys32_ftruncate_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
jg sys_ftruncate # branch to system call
- .globl sys32_fchmod_wrapper
+ .globl sys32_fchmod_wrapper
sys32_fchmod_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # mode_t
jg sys_fchmod # branch to system call
- .globl sys32_fchown16_wrapper
+ .globl sys32_fchown16_wrapper
sys32_fchown16_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # compat_uid_t
llgfr %r4,%r4 # compat_uid_t
jg sys32_fchown16 # branch to system call
- .globl sys32_getpriority_wrapper
+ .globl sys32_getpriority_wrapper
sys32_getpriority_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_getpriority # branch to system call
- .globl sys32_setpriority_wrapper
+ .globl sys32_setpriority_wrapper
sys32_setpriority_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_setpriority # branch to system call
- .globl compat_sys_statfs_wrapper
+ .globl compat_sys_statfs_wrapper
compat_sys_statfs_wrapper:
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_statfs *
jg compat_sys_statfs # branch to system call
- .globl compat_sys_fstatfs_wrapper
+ .globl compat_sys_fstatfs_wrapper
compat_sys_fstatfs_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct compat_statfs *
jg compat_sys_fstatfs # branch to system call
- .globl compat_sys_socketcall_wrapper
+ .globl compat_sys_socketcall_wrapper
compat_sys_socketcall_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # u32 *
jg compat_sys_socketcall # branch to system call
- .globl sys32_syslog_wrapper
+ .globl sys32_syslog_wrapper
sys32_syslog_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
lgfr %r4,%r4 # int
jg sys_syslog # branch to system call
- .globl compat_sys_setitimer_wrapper
+ .globl compat_sys_setitimer_wrapper
compat_sys_setitimer_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct itimerval_emu31 *
llgtr %r4,%r4 # struct itimerval_emu31 *
jg compat_sys_setitimer # branch to system call
- .globl compat_sys_getitimer_wrapper
+ .globl compat_sys_getitimer_wrapper
compat_sys_getitimer_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct itimerval_emu31 *
jg compat_sys_getitimer # branch to system call
- .globl compat_sys_newstat_wrapper
+ .globl compat_sys_newstat_wrapper
compat_sys_newstat_wrapper:
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newstat # branch to system call
- .globl compat_sys_newlstat_wrapper
+ .globl compat_sys_newlstat_wrapper
compat_sys_newlstat_wrapper:
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newlstat # branch to system call
- .globl compat_sys_newfstat_wrapper
+ .globl compat_sys_newfstat_wrapper
compat_sys_newfstat_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newfstat # branch to system call
-#sys32_vhangup_wrapper # void
+#sys32_vhangup_wrapper # void
- .globl compat_sys_wait4_wrapper
+ .globl compat_sys_wait4_wrapper
compat_sys_wait4_wrapper:
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # unsigned int *
@@ -512,17 +512,17 @@ compat_sys_wait4_wrapper:
llgtr %r5,%r5 # struct rusage *
jg compat_sys_wait4 # branch to system call
- .globl sys32_swapoff_wrapper
+ .globl sys32_swapoff_wrapper
sys32_swapoff_wrapper:
llgtr %r2,%r2 # const char *
jg sys_swapoff # branch to system call
- .globl sys32_sysinfo_wrapper
+ .globl sys32_sysinfo_wrapper
sys32_sysinfo_wrapper:
llgtr %r2,%r2 # struct sysinfo_emu31 *
jg sys32_sysinfo # branch to system call
- .globl sys32_ipc_wrapper
+ .globl sys32_ipc_wrapper
sys32_ipc_wrapper:
llgfr %r2,%r2 # uint
lgfr %r3,%r3 # int
@@ -531,59 +531,59 @@ sys32_ipc_wrapper:
llgfr %r6,%r6 # u32
jg sys32_ipc # branch to system call
- .globl sys32_fsync_wrapper
+ .globl sys32_fsync_wrapper
sys32_fsync_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_fsync # branch to system call
-#sys32_sigreturn_wrapper # done in sigreturn_glue
+#sys32_sigreturn_wrapper # done in sigreturn_glue
-#sys32_clone_wrapper # done in clone_glue
+#sys32_clone_wrapper # done in clone_glue
- .globl sys32_setdomainname_wrapper
+ .globl sys32_setdomainname_wrapper
sys32_setdomainname_wrapper:
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_setdomainname # branch to system call
- .globl sys32_newuname_wrapper
+ .globl sys32_newuname_wrapper
sys32_newuname_wrapper:
llgtr %r2,%r2 # struct new_utsname *
jg s390x_newuname # branch to system call
- .globl compat_sys_adjtimex_wrapper
+ .globl compat_sys_adjtimex_wrapper
compat_sys_adjtimex_wrapper:
llgtr %r2,%r2 # struct compat_timex *
jg compat_sys_adjtimex # branch to system call
- .globl sys32_mprotect_wrapper
+ .globl sys32_mprotect_wrapper
sys32_mprotect_wrapper:
llgtr %r2,%r2 # unsigned long (actually pointer
llgfr %r3,%r3 # size_t
llgfr %r4,%r4 # unsigned long
jg sys_mprotect # branch to system call
- .globl compat_sys_sigprocmask_wrapper
+ .globl compat_sys_sigprocmask_wrapper
compat_sys_sigprocmask_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_old_sigset_t *
llgtr %r4,%r4 # compat_old_sigset_t *
jg compat_sys_sigprocmask # branch to system call
- .globl sys32_init_module_wrapper
+ .globl sys32_init_module_wrapper
sys32_init_module_wrapper:
llgtr %r2,%r2 # void *
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # char *
jg sys32_init_module # branch to system call
- .globl sys32_delete_module_wrapper
+ .globl sys32_delete_module_wrapper
sys32_delete_module_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned int
jg sys32_delete_module # branch to system call
- .globl sys32_quotactl_wrapper
+ .globl sys32_quotactl_wrapper
sys32_quotactl_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
@@ -591,45 +591,45 @@ sys32_quotactl_wrapper:
llgtr %r5,%r5 # caddr_t
jg sys_quotactl # branch to system call
- .globl sys32_getpgid_wrapper
+ .globl sys32_getpgid_wrapper
sys32_getpgid_wrapper:
lgfr %r2,%r2 # pid_t
jg sys_getpgid # branch to system call
- .globl sys32_fchdir_wrapper
+ .globl sys32_fchdir_wrapper
sys32_fchdir_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_fchdir # branch to system call
- .globl sys32_bdflush_wrapper
+ .globl sys32_bdflush_wrapper
sys32_bdflush_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # long
jg sys_bdflush # branch to system call
- .globl sys32_sysfs_wrapper
+ .globl sys32_sysfs_wrapper
sys32_sysfs_wrapper:
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys_sysfs # branch to system call
- .globl sys32_personality_wrapper
+ .globl sys32_personality_wrapper
sys32_personality_wrapper:
llgfr %r2,%r2 # unsigned long
jg s390x_personality # branch to system call
- .globl sys32_setfsuid16_wrapper
+ .globl sys32_setfsuid16_wrapper
sys32_setfsuid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_uid_emu31_t
jg sys32_setfsuid16 # branch to system call
- .globl sys32_setfsgid16_wrapper
+ .globl sys32_setfsgid16_wrapper
sys32_setfsgid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_gid_emu31_t
jg sys32_setfsgid16 # branch to system call
- .globl sys32_llseek_wrapper
+ .globl sys32_llseek_wrapper
sys32_llseek_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
@@ -638,14 +638,14 @@ sys32_llseek_wrapper:
llgfr %r6,%r6 # unsigned int
jg sys_llseek # branch to system call
- .globl sys32_getdents_wrapper
+ .globl sys32_getdents_wrapper
sys32_getdents_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg compat_sys_getdents # branch to system call
- .globl compat_sys_select_wrapper
+ .globl compat_sys_select_wrapper
compat_sys_select_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_fd_set *
@@ -654,113 +654,113 @@ compat_sys_select_wrapper:
llgtr %r6,%r6 # struct compat_timeval *
jg compat_sys_select # branch to system call
- .globl sys32_flock_wrapper
+ .globl sys32_flock_wrapper
sys32_flock_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_flock # branch to system call
- .globl sys32_msync_wrapper
+ .globl sys32_msync_wrapper
sys32_msync_wrapper:
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
lgfr %r4,%r4 # int
jg sys_msync # branch to system call
- .globl compat_sys_readv_wrapper
+ .globl compat_sys_readv_wrapper
compat_sys_readv_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct compat_iovec *
llgfr %r4,%r4 # unsigned long
jg compat_sys_readv # branch to system call
- .globl compat_sys_writev_wrapper
+ .globl compat_sys_writev_wrapper
compat_sys_writev_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct compat_iovec *
llgfr %r4,%r4 # unsigned long
jg compat_sys_writev # branch to system call
- .globl sys32_getsid_wrapper
+ .globl sys32_getsid_wrapper
sys32_getsid_wrapper:
lgfr %r2,%r2 # pid_t
jg sys_getsid # branch to system call
- .globl sys32_fdatasync_wrapper
+ .globl sys32_fdatasync_wrapper
sys32_fdatasync_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_fdatasync # branch to system call
-#sys32_sysctl_wrapper # tbd
+#sys32_sysctl_wrapper # tbd
- .globl sys32_mlock_wrapper
+ .globl sys32_mlock_wrapper
sys32_mlock_wrapper:
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_mlock # branch to system call
- .globl sys32_munlock_wrapper
+ .globl sys32_munlock_wrapper
sys32_munlock_wrapper:
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_munlock # branch to system call
- .globl sys32_mlockall_wrapper
+ .globl sys32_mlockall_wrapper
sys32_mlockall_wrapper:
lgfr %r2,%r2 # int
jg sys_mlockall # branch to system call
-#sys32_munlockall_wrapper # void
+#sys32_munlockall_wrapper # void
- .globl sys32_sched_setparam_wrapper
+ .globl sys32_sched_setparam_wrapper
sys32_sched_setparam_wrapper:
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct sched_param *
jg sys_sched_setparam # branch to system call
- .globl sys32_sched_getparam_wrapper
+ .globl sys32_sched_getparam_wrapper
sys32_sched_getparam_wrapper:
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct sched_param *
jg sys_sched_getparam # branch to system call
- .globl sys32_sched_setscheduler_wrapper
+ .globl sys32_sched_setscheduler_wrapper
sys32_sched_setscheduler_wrapper:
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct sched_param *
jg sys_sched_setscheduler # branch to system call
- .globl sys32_sched_getscheduler_wrapper
+ .globl sys32_sched_getscheduler_wrapper
sys32_sched_getscheduler_wrapper:
lgfr %r2,%r2 # pid_t
jg sys_sched_getscheduler # branch to system call
-#sys32_sched_yield_wrapper # void
+#sys32_sched_yield_wrapper # void
- .globl sys32_sched_get_priority_max_wrapper
+ .globl sys32_sched_get_priority_max_wrapper
sys32_sched_get_priority_max_wrapper:
lgfr %r2,%r2 # int
jg sys_sched_get_priority_max # branch to system call
- .globl sys32_sched_get_priority_min_wrapper
+ .globl sys32_sched_get_priority_min_wrapper
sys32_sched_get_priority_min_wrapper:
lgfr %r2,%r2 # int
jg sys_sched_get_priority_min # branch to system call
- .globl sys32_sched_rr_get_interval_wrapper
+ .globl sys32_sched_rr_get_interval_wrapper
sys32_sched_rr_get_interval_wrapper:
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct compat_timespec *
jg sys32_sched_rr_get_interval # branch to system call
- .globl compat_sys_nanosleep_wrapper
+ .globl compat_sys_nanosleep_wrapper
compat_sys_nanosleep_wrapper:
llgtr %r2,%r2 # struct compat_timespec *
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_nanosleep # branch to system call
- .globl sys32_mremap_wrapper
+ .globl sys32_mremap_wrapper
sys32_mremap_wrapper:
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
@@ -769,49 +769,49 @@ sys32_mremap_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_mremap # branch to system call
- .globl sys32_setresuid16_wrapper
+ .globl sys32_setresuid16_wrapper
sys32_setresuid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_uid_emu31_t
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- llgfr %r4,%r4 # __kernel_old_uid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_uid_emu31_t
+ llgfr %r3,%r3 # __kernel_old_uid_emu31_t
+ llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_setresuid16 # branch to system call
- .globl sys32_getresuid16_wrapper
+ .globl sys32_getresuid16_wrapper
sys32_getresuid16_wrapper:
llgtr %r2,%r2 # __kernel_old_uid_emu31_t *
llgtr %r3,%r3 # __kernel_old_uid_emu31_t *
llgtr %r4,%r4 # __kernel_old_uid_emu31_t *
jg sys32_getresuid16 # branch to system call
- .globl sys32_poll_wrapper
+ .globl sys32_poll_wrapper
sys32_poll_wrapper:
- llgtr %r2,%r2 # struct pollfd *
- llgfr %r3,%r3 # unsigned int
- lgfr %r4,%r4 # long
+ llgtr %r2,%r2 # struct pollfd *
+ llgfr %r3,%r3 # unsigned int
+ lgfr %r4,%r4 # long
jg sys_poll # branch to system call
- .globl compat_sys_nfsservctl_wrapper
+ .globl compat_sys_nfsservctl_wrapper
compat_sys_nfsservctl_wrapper:
- lgfr %r2,%r2 # int
+ lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_nfsctl_arg*
llgtr %r4,%r4 # union compat_nfsctl_res*
jg compat_sys_nfsservctl # branch to system call
- .globl sys32_setresgid16_wrapper
+ .globl sys32_setresgid16_wrapper
sys32_setresgid16_wrapper:
- llgfr %r2,%r2 # __kernel_old_gid_emu31_t
- llgfr %r3,%r3 # __kernel_old_gid_emu31_t
- llgfr %r4,%r4 # __kernel_old_gid_emu31_t
+ llgfr %r2,%r2 # __kernel_old_gid_emu31_t
+ llgfr %r3,%r3 # __kernel_old_gid_emu31_t
+ llgfr %r4,%r4 # __kernel_old_gid_emu31_t
jg sys32_setresgid16 # branch to system call
- .globl sys32_getresgid16_wrapper
+ .globl sys32_getresgid16_wrapper
sys32_getresgid16_wrapper:
llgtr %r2,%r2 # __kernel_old_gid_emu31_t *
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
llgtr %r4,%r4 # __kernel_old_gid_emu31_t *
jg sys32_getresgid16 # branch to system call
- .globl sys32_prctl_wrapper
+ .globl sys32_prctl_wrapper
sys32_prctl_wrapper:
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned long
@@ -820,9 +820,9 @@ sys32_prctl_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_prctl # branch to system call
-#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
+#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
- .globl sys32_rt_sigaction_wrapper
+ .globl sys32_rt_sigaction_wrapper
sys32_rt_sigaction_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct sigaction_emu31 *
@@ -830,7 +830,7 @@ sys32_rt_sigaction_wrapper:
llgfr %r5,%r5 # size_t
jg sys32_rt_sigaction # branch to system call
- .globl sys32_rt_sigprocmask_wrapper
+ .globl sys32_rt_sigprocmask_wrapper
sys32_rt_sigprocmask_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # old_sigset_emu31 *
@@ -838,13 +838,13 @@ sys32_rt_sigprocmask_wrapper:
llgfr %r5,%r5 # size_t
jg sys32_rt_sigprocmask # branch to system call
- .globl sys32_rt_sigpending_wrapper
+ .globl sys32_rt_sigpending_wrapper
sys32_rt_sigpending_wrapper:
llgtr %r2,%r2 # sigset_emu31 *
llgfr %r3,%r3 # size_t
jg sys32_rt_sigpending # branch to system call
- .globl compat_sys_rt_sigtimedwait_wrapper
+ .globl compat_sys_rt_sigtimedwait_wrapper
compat_sys_rt_sigtimedwait_wrapper:
llgtr %r2,%r2 # const sigset_emu31_t *
llgtr %r3,%r3 # siginfo_emu31_t *
@@ -852,7 +852,7 @@ compat_sys_rt_sigtimedwait_wrapper:
llgfr %r5,%r5 # size_t
jg compat_sys_rt_sigtimedwait # branch to system call
- .globl sys32_rt_sigqueueinfo_wrapper
+ .globl sys32_rt_sigqueueinfo_wrapper
sys32_rt_sigqueueinfo_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
@@ -865,7 +865,7 @@ compat_sys_rt_sigsuspend_wrapper:
llgfr %r3,%r3 # compat_size_t
jg compat_sys_rt_sigsuspend
- .globl sys32_pread64_wrapper
+ .globl sys32_pread64_wrapper
sys32_pread64_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
@@ -874,7 +874,7 @@ sys32_pread64_wrapper:
llgfr %r6,%r6 # u32
jg sys32_pread64 # branch to system call
- .globl sys32_pwrite64_wrapper
+ .globl sys32_pwrite64_wrapper
sys32_pwrite64_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
@@ -883,26 +883,26 @@ sys32_pwrite64_wrapper:
llgfr %r6,%r6 # u32
jg sys32_pwrite64 # branch to system call
- .globl sys32_chown16_wrapper
+ .globl sys32_chown16_wrapper
sys32_chown16_wrapper:
llgtr %r2,%r2 # const char *
- llgfr %r3,%r3 # __kernel_old_uid_emu31_t
- llgfr %r4,%r4 # __kernel_old_gid_emu31_t
+ llgfr %r3,%r3 # __kernel_old_uid_emu31_t
+ llgfr %r4,%r4 # __kernel_old_gid_emu31_t
jg sys32_chown16 # branch to system call
- .globl sys32_getcwd_wrapper
+ .globl sys32_getcwd_wrapper
sys32_getcwd_wrapper:
llgtr %r2,%r2 # char *
llgfr %r3,%r3 # unsigned long
jg sys_getcwd # branch to system call
- .globl sys32_capget_wrapper
+ .globl sys32_capget_wrapper
sys32_capget_wrapper:
llgtr %r2,%r2 # cap_user_header_t
llgtr %r3,%r3 # cap_user_data_t
jg sys_capget # branch to system call
- .globl sys32_capset_wrapper
+ .globl sys32_capset_wrapper
sys32_capset_wrapper:
llgtr %r2,%r2 # cap_user_header_t
llgtr %r3,%r3 # const cap_user_data_t
@@ -910,11 +910,11 @@ sys32_capset_wrapper:
.globl sys32_sigaltstack_wrapper
sys32_sigaltstack_wrapper:
- llgtr %r2,%r2 # const stack_emu31_t *
- llgtr %r3,%r3 # stack_emu31_t *
+ llgtr %r2,%r2 # const stack_emu31_t *
+ llgtr %r3,%r3 # stack_emu31_t *
jg sys32_sigaltstack
- .globl sys32_sendfile_wrapper
+ .globl sys32_sendfile_wrapper
sys32_sendfile_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
@@ -922,33 +922,33 @@ sys32_sendfile_wrapper:
llgfr %r5,%r5 # size_t
jg sys32_sendfile # branch to system call
-#sys32_vfork_wrapper # done in vfork_glue
+#sys32_vfork_wrapper # done in vfork_glue
- .globl sys32_truncate64_wrapper
+ .globl sys32_truncate64_wrapper
sys32_truncate64_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys32_truncate64 # branch to system call
- .globl sys32_ftruncate64_wrapper
+ .globl sys32_ftruncate64_wrapper
sys32_ftruncate64_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys32_ftruncate64 # branch to system call
- .globl sys32_lchown_wrapper
+ .globl sys32_lchown_wrapper
sys32_lchown_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_lchown # branch to system call
-#sys32_getuid_wrapper # void
-#sys32_getgid_wrapper # void
-#sys32_geteuid_wrapper # void
-#sys32_getegid_wrapper # void
+#sys32_getuid_wrapper # void
+#sys32_getgid_wrapper # void
+#sys32_geteuid_wrapper # void
+#sys32_getegid_wrapper # void
.globl sys32_setreuid_wrapper
sys32_setreuid_wrapper:
@@ -962,111 +962,111 @@ sys32_setregid_wrapper:
llgfr %r3,%r3 # gid_t
jg sys_setregid # branch to system call
- .globl sys32_getgroups_wrapper
+ .globl sys32_getgroups_wrapper
sys32_getgroups_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # gid_t *
jg sys_getgroups # branch to system call
- .globl sys32_setgroups_wrapper
+ .globl sys32_setgroups_wrapper
sys32_setgroups_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # gid_t *
jg sys_setgroups # branch to system call
- .globl sys32_fchown_wrapper
+ .globl sys32_fchown_wrapper
sys32_fchown_wrapper:
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_fchown # branch to system call
- .globl sys32_setresuid_wrapper
+ .globl sys32_setresuid_wrapper
sys32_setresuid_wrapper:
llgfr %r2,%r2 # uid_t
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # uid_t
jg sys_setresuid # branch to system call
- .globl sys32_getresuid_wrapper
+ .globl sys32_getresuid_wrapper
sys32_getresuid_wrapper:
llgtr %r2,%r2 # uid_t *
llgtr %r3,%r3 # uid_t *
llgtr %r4,%r4 # uid_t *
jg sys_getresuid # branch to system call
- .globl sys32_setresgid_wrapper
+ .globl sys32_setresgid_wrapper
sys32_setresgid_wrapper:
llgfr %r2,%r2 # gid_t
llgfr %r3,%r3 # gid_t
llgfr %r4,%r4 # gid_t
jg sys_setresgid # branch to system call
- .globl sys32_getresgid_wrapper
+ .globl sys32_getresgid_wrapper
sys32_getresgid_wrapper:
llgtr %r2,%r2 # gid_t *
llgtr %r3,%r3 # gid_t *
llgtr %r4,%r4 # gid_t *
jg sys_getresgid # branch to system call
- .globl sys32_chown_wrapper
+ .globl sys32_chown_wrapper
sys32_chown_wrapper:
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_chown # branch to system call
- .globl sys32_setuid_wrapper
+ .globl sys32_setuid_wrapper
sys32_setuid_wrapper:
llgfr %r2,%r2 # uid_t
jg sys_setuid # branch to system call
- .globl sys32_setgid_wrapper
+ .globl sys32_setgid_wrapper
sys32_setgid_wrapper:
llgfr %r2,%r2 # gid_t
jg sys_setgid # branch to system call
- .globl sys32_setfsuid_wrapper
+ .globl sys32_setfsuid_wrapper
sys32_setfsuid_wrapper:
llgfr %r2,%r2 # uid_t
jg sys_setfsuid # branch to system call
- .globl sys32_setfsgid_wrapper
+ .globl sys32_setfsgid_wrapper
sys32_setfsgid_wrapper:
llgfr %r2,%r2 # gid_t
jg sys_setfsgid # branch to system call
- .globl sys32_pivot_root_wrapper
+ .globl sys32_pivot_root_wrapper
sys32_pivot_root_wrapper:
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_pivot_root # branch to system call
- .globl sys32_mincore_wrapper
+ .globl sys32_mincore_wrapper
sys32_mincore_wrapper:
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
llgtr %r4,%r4 # unsigned char *
jg sys_mincore # branch to system call
- .globl sys32_madvise_wrapper
+ .globl sys32_madvise_wrapper
sys32_madvise_wrapper:
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
lgfr %r4,%r4 # int
jg sys_madvise # branch to system call
- .globl sys32_getdents64_wrapper
+ .globl sys32_getdents64_wrapper
sys32_getdents64_wrapper:
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg sys_getdents64 # branch to system call
- .globl compat_sys_fcntl64_wrapper
+ .globl compat_sys_fcntl64_wrapper
compat_sys_fcntl64_wrapper:
llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned int
+ llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned long
jg compat_sys_fcntl64 # branch to system call
@@ -1087,10 +1087,10 @@ sys32_stime_wrapper:
llgtr %r2,%r2 # long *
jg compat_sys_stime # branch to system call
- .globl sys32_sysctl_wrapper
+ .globl sys32_sysctl_wrapper
sys32_sysctl_wrapper:
- llgtr %r2,%r2 # struct __sysctl_args32 *
- jg sys32_sysctl
+ llgtr %r2,%r2 # struct __sysctl_args32 *
+ jg sys32_sysctl
.globl sys32_fstat64_wrapper
sys32_fstat64_wrapper:
@@ -1098,7 +1098,7 @@ sys32_fstat64_wrapper:
llgtr %r3,%r3 # struct stat64 *
jg sys32_fstat64 # branch to system call
- .globl compat_sys_futex_wrapper
+ .globl compat_sys_futex_wrapper
compat_sys_futex_wrapper:
llgtr %r2,%r2 # u32 *
lgfr %r3,%r3 # int
@@ -1213,22 +1213,22 @@ sys32_sched_getaffinity_wrapper:
llgtr %r4,%r4 # unsigned long *
jg compat_sys_sched_getaffinity
- .globl sys32_exit_group_wrapper
+ .globl sys32_exit_group_wrapper
sys32_exit_group_wrapper:
lgfr %r2,%r2 # int
jg sys_exit_group # branch to system call
- .globl sys32_set_tid_address_wrapper
+ .globl sys32_set_tid_address_wrapper
sys32_set_tid_address_wrapper:
llgtr %r2,%r2 # int *
jg sys_set_tid_address # branch to system call
- .globl sys_epoll_create_wrapper
+ .globl sys_epoll_create_wrapper
sys_epoll_create_wrapper:
lgfr %r2,%r2 # int
jg sys_epoll_create # branch to system call
- .globl sys_epoll_ctl_wrapper
+ .globl sys_epoll_ctl_wrapper
sys_epoll_ctl_wrapper:
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
@@ -1236,7 +1236,7 @@ sys_epoll_ctl_wrapper:
llgtr %r5,%r5 # struct epoll_event *
jg sys_epoll_ctl # branch to system call
- .globl sys_epoll_wait_wrapper
+ .globl sys_epoll_wait_wrapper
sys_epoll_wait_wrapper:
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct epoll_event *
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 4ef44e536b2c..1eae74e72f95 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -25,11 +25,8 @@ static char cpcmd_buf[241];
*/
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
- const int mask = 0x40000000L;
- unsigned long flags;
- int return_code;
- int return_len;
- int cmdlen;
+ unsigned long flags, cmdlen;
+ int return_code, return_len;
spin_lock_irqsave(&cpcmd_lock, flags);
cmdlen = strlen(cmd);
@@ -38,64 +35,44 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
ASCEBC(cpcmd_buf, cmdlen);
if (response != NULL && rlen > 0) {
+ register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
+ register unsigned long reg3 asm ("3") = (addr_t) response;
+ register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
+ register unsigned long reg5 asm ("5") = rlen;
+
memset(response, 0, rlen);
+ asm volatile(
#ifndef CONFIG_64BIT
- asm volatile ( "lra 2,0(%2)\n"
- "lr 4,%3\n"
- "o 4,%6\n"
- "lra 3,0(%4)\n"
- "lr 5,%5\n"
- "diag 2,4,0x8\n"
- "brc 8, 1f\n"
- "ar 5, %5\n"
- "1: \n"
- "lr %0,4\n"
- "lr %1,5\n"
- : "=d" (return_code), "=d" (return_len)
- : "a" (cpcmd_buf), "d" (cmdlen),
- "a" (response), "d" (rlen), "m" (mask)
- : "cc", "2", "3", "4", "5" );
+ " diag %2,%0,0x8\n"
+ " brc 8,1f\n"
+ " ar %1,%4\n"
#else /* CONFIG_64BIT */
- asm volatile ( "lrag 2,0(%2)\n"
- "lgr 4,%3\n"
- "o 4,%6\n"
- "lrag 3,0(%4)\n"
- "lgr 5,%5\n"
- "sam31\n"
- "diag 2,4,0x8\n"
- "sam64\n"
- "brc 8, 1f\n"
- "agr 5, %5\n"
- "1: \n"
- "lgr %0,4\n"
- "lgr %1,5\n"
- : "=d" (return_code), "=d" (return_len)
- : "a" (cpcmd_buf), "d" (cmdlen),
- "a" (response), "d" (rlen), "m" (mask)
- : "cc", "2", "3", "4", "5" );
+ " sam31\n"
+ " diag %2,%0,0x8\n"
+ " sam64\n"
+ " brc 8,1f\n"
+ " agr %1,%4\n"
#endif /* CONFIG_64BIT */
+ "1:\n"
+ : "+d" (reg4), "+d" (reg5)
+ : "d" (reg2), "d" (reg3), "d" (rlen) : "cc");
+ return_code = (int) reg4;
+ return_len = (int) reg5;
EBCASC(response, rlen);
} else {
+ register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
+ register unsigned long reg3 asm ("3") = cmdlen;
return_len = 0;
+ asm volatile(
#ifndef CONFIG_64BIT
- asm volatile ( "lra 2,0(%1)\n"
- "lr 3,%2\n"
- "diag 2,3,0x8\n"
- "lr %0,3\n"
- : "=d" (return_code)
- : "a" (cpcmd_buf), "d" (cmdlen)
- : "2", "3" );
+ " diag %1,%0,0x8\n"
#else /* CONFIG_64BIT */
- asm volatile ( "lrag 2,0(%1)\n"
- "lgr 3,%2\n"
- "sam31\n"
- "diag 2,3,0x8\n"
- "sam64\n"
- "lgr %0,3\n"
- : "=d" (return_code)
- : "a" (cpcmd_buf), "d" (cmdlen)
- : "2", "3" );
+ " sam31\n"
+ " diag %1,%0,0x8\n"
+ " sam64\n"
#endif /* CONFIG_64BIT */
+ : "+d" (reg3) : "d" (reg2) : "cc");
+ return_code = (int) reg3;
}
spin_unlock_irqrestore(&cpcmd_lock, flags);
if (response_code != NULL)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 0c712b78a7e8..dddc3de30401 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -4,8 +4,8 @@
*
* Copyright (C) IBM Corp. 1999,2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Hartmut Penner (hp@de.ibm.com),
- * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ * Hartmut Penner (hp@de.ibm.com),
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -24,29 +24,29 @@
* Stack layout for the system_call stack entry.
* The first few entries are identical to the user_regs_struct.
*/
-SP_PTREGS = STACK_FRAME_OVERHEAD
-SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
-SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
-SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
-SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
-SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
-SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
-SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
-SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
-SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
-SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
-SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
+SP_PTREGS = STACK_FRAME_OVERHEAD
+SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
+SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
+SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
+SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
+SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
+SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
+SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
+SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
+SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
@@ -81,14 +81,14 @@ STACK_SIZE = 1 << STACK_SHIFT
* R15 - kernel stack pointer
*/
- .macro STORE_TIMER lc_offset
+ .macro STORE_TIMER lc_offset
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
stpt \lc_offset
#endif
.endm
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- .macro UPDATE_VTIME lc_from,lc_to,lc_sum
+ .macro UPDATE_VTIME lc_from,lc_to,lc_sum
lm %r10,%r11,\lc_from
sl %r10,\lc_to
sl %r11,\lc_to+4
@@ -147,7 +147,7 @@ STACK_SIZE = 1 << STACK_SHIFT
2:
.endm
- .macro CREATE_STACK_FRAME psworg,savearea
+ .macro CREATE_STACK_FRAME psworg,savearea
s %r15,BASED(.Lc_spsize) # make room for registers & psw
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
la %r12,\psworg
@@ -160,7 +160,7 @@ STACK_SIZE = 1 << STACK_SHIFT
st %r12,__SF_BACKCHAIN(%r15) # clear back chain
.endm
- .macro RESTORE_ALL psworg,sync
+ .macro RESTORE_ALL psworg,sync
mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
.if !\sync
ni \psworg+1,0xfd # clear wait state bit
@@ -177,16 +177,16 @@ STACK_SIZE = 1 << STACK_SHIFT
* Returns:
* gpr2 = prev
*/
- .globl __switch_to
+ .globl __switch_to
__switch_to:
- basr %r1,0
+ basr %r1,0
__switch_to_base:
tm __THREAD_per(%r3),0xe8 # new process is using per ?
bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
- stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
- clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
- be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
- lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
+ stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
+ clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
+ be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
+ lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
l %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
@@ -195,13 +195,13 @@ __switch_to_noper:
l %r4,__THREAD_info(%r3) # get thread_info of next
oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
__switch_to_no_mcck:
- stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+ stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
st %r3,__LC_CURRENT # __LC_CURRENT = current task struct
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- l %r3,__THREAD_info(%r3) # load thread_info from task struct
+ l %r3,__THREAD_info(%r3) # load thread_info from task struct
st %r3,__LC_THREAD_INFO
ahi %r3,STACK_SIZE
st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
@@ -213,7 +213,7 @@ __critical_start:
* are executed with interrupts enabled.
*/
- .globl system_call
+ .globl system_call
system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER
sysc_saveall:
@@ -233,24 +233,24 @@ sysc_update:
#endif
sysc_do_svc:
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- sla %r7,2 # *4 and test for svc 0
- bnz BASED(sysc_nr_ok) # svc number > 0
+ sla %r7,2 # *4 and test for svc 0
+ bnz BASED(sysc_nr_ok) # svc number > 0
# svc 0: system call number in %r1
cl %r1,BASED(.Lnr_syscalls)
bnl BASED(sysc_nr_ok)
- lr %r7,%r1 # copy svc number to %r7
- sla %r7,2 # *4
+ lr %r7,%r1 # copy svc number to %r7
+ sla %r7,2 # *4
sysc_nr_ok:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
sysc_do_restart:
l %r8,BASED(.Lsysc_table)
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
l %r8,0(%r7,%r8) # get system call addr.
- bnz BASED(sysc_tracesys)
- basr %r14,%r8 # call sys_xxxx
- st %r2,SP_R2(%r15) # store return value (change R2 on stack)
- # ATTENTION: check sys_execve_glue before
- # changing anything here !!
+ bnz BASED(sysc_tracesys)
+ basr %r14,%r8 # call sys_xxxx
+ st %r2,SP_R2(%r15) # store return value (change R2 on stack)
+ # ATTENTION: check sys_execve_glue before
+ # changing anything here !!
sysc_return:
tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -258,14 +258,14 @@ sysc_return:
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL __LC_RETURN_PSW,1
+ RESTORE_ALL __LC_RETURN_PSW,1
#
# recheck if there is more work to do
#
sysc_work_loop:
tm __TI_flags+3(%r9),_TIF_WORK_SVC
- bz BASED(sysc_leave) # there is no work to do
+ bz BASED(sysc_leave) # there is no work to do
#
# One of the work bits is on. Find out which one.
#
@@ -284,11 +284,11 @@ sysc_work:
#
# _TIF_NEED_RESCHED is set, call schedule
-#
-sysc_reschedule:
- l %r1,BASED(.Lschedule)
- la %r14,BASED(sysc_work_loop)
- br %r1 # call scheduler
+#
+sysc_reschedule:
+ l %r1,BASED(.Lschedule)
+ la %r14,BASED(sysc_work_loop)
+ br %r1 # call scheduler
#
# _TIF_MCCK_PENDING is set, call handler
@@ -301,11 +301,11 @@ sysc_mcck_pending:
#
# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
#
-sysc_sigpending:
+sysc_sigpending:
ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Ldo_signal)
- basr %r14,%r1 # call do_signal
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Ldo_signal)
+ basr %r14,%r1 # call do_signal
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
@@ -317,11 +317,11 @@ sysc_sigpending:
#
sysc_restart:
ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
- l %r7,SP_R2(%r15) # load new svc number
+ l %r7,SP_R2(%r15) # load new svc number
sla %r7,2
mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
- lm %r2,%r6,SP_R2(%r15) # load svc arguments
- b BASED(sysc_do_restart) # restart svc
+ lm %r2,%r6,SP_R2(%r15) # load svc arguments
+ b BASED(sysc_do_restart) # restart svc
#
# _TIF_SINGLE_STEP is set, call do_single_step
@@ -338,8 +338,8 @@ sysc_singlestep:
# call trace before and after sys_call
#
sysc_tracesys:
- l %r1,BASED(.Ltrace)
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Ltrace)
+ la %r2,SP_PTREGS(%r15) # load pt_regs
la %r3,0
srl %r7,2
st %r7,SP_R2(%r15)
@@ -347,19 +347,19 @@ sysc_tracesys:
clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
bnl BASED(sysc_tracenogo)
l %r8,BASED(.Lsysc_table)
- l %r7,SP_R2(%r15) # strace might have changed the
- sll %r7,2 # system call
+ l %r7,SP_R2(%r15) # strace might have changed the
+ sll %r7,2 # system call
l %r8,0(%r7,%r8)
sysc_tracego:
lm %r3,%r6,SP_R3(%r15)
l %r2,SP_ORIG_R2(%r15)
- basr %r14,%r8 # call sys_xxx
- st %r2,SP_R2(%r15) # store return value
+ basr %r14,%r8 # call sys_xxx
+ st %r2,SP_R2(%r15) # store return value
sysc_tracenogo:
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
- bz BASED(sysc_return)
+ bz BASED(sysc_return)
l %r1,BASED(.Ltrace)
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ la %r2,SP_PTREGS(%r15) # load pt_regs
la %r3,1
la %r14,BASED(sysc_return)
br %r1
@@ -367,17 +367,17 @@ sysc_tracenogo:
#
# a new process exits the kernel with ret_from_fork
#
- .globl ret_from_fork
+ .globl ret_from_fork
ret_from_fork:
l %r13,__LC_SVC_NEW_PSW+4
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
bo BASED(0f)
st %r15,SP_R15(%r15) # store stack pointer for new kthread
-0: l %r1,BASED(.Lschedtail)
- basr %r14,%r1
+0: l %r1,BASED(.Lschedtail)
+ basr %r14,%r1
TRACE_IRQS_ON
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_return)
#
@@ -386,52 +386,51 @@ ret_from_fork:
# but are called with different parameter.
# return-address is set up above
#
-sys_clone_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lclone)
- br %r1 # branch to sys_clone
-
-sys_fork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lfork)
- br %r1 # branch to sys_fork
-
-sys_vfork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lvfork)
- br %r1 # branch to sys_vfork
-
-sys_execve_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Lexecve)
- lr %r12,%r14 # save return address
- basr %r14,%r1 # call sys_execve
- ltr %r2,%r2 # check if execve failed
- bnz 0(%r12) # it did fail -> store result in gpr2
- b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8
- # in system_call/sysc_tracesys
-
-sys_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- l %r1,BASED(.Lsigreturn)
- br %r1 # branch to sys_sigreturn
-
-sys_rt_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- l %r1,BASED(.Lrt_sigreturn)
- br %r1 # branch to sys_sigreturn
+sys_clone_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Lclone)
+ br %r1 # branch to sys_clone
-sys_sigaltstack_glue:
- la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
- l %r1,BASED(.Lsigaltstack)
- br %r1 # branch to sys_sigreturn
+sys_fork_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Lfork)
+ br %r1 # branch to sys_fork
+sys_vfork_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Lvfork)
+ br %r1 # branch to sys_vfork
+
+sys_execve_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Lexecve)
+ lr %r12,%r14 # save return address
+ basr %r14,%r1 # call sys_execve
+ ltr %r2,%r2 # check if execve failed
+ bnz 0(%r12) # it did fail -> store result in gpr2
+ b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8
+ # in system_call/sysc_tracesys
+
+sys_sigreturn_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
+ l %r1,BASED(.Lsigreturn)
+ br %r1 # branch to sys_sigreturn
+
+sys_rt_sigreturn_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
+ l %r1,BASED(.Lrt_sigreturn)
+ br %r1 # branch to sys_sigreturn
+
+sys_sigaltstack_glue:
+ la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
+ l %r1,BASED(.Lsigaltstack)
+ br %r1 # branch to sys_sigreturn
/*
* Program check handler routine
*/
- .globl pgm_check_handler
+ .globl pgm_check_handler
pgm_check_handler:
/*
* First we need to check for a special case:
@@ -448,8 +447,8 @@ pgm_check_handler:
*/
STORE_TIMER __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA
- tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
- bnz BASED(pgm_per) # got per exception -> special case
+ tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+ bnz BASED(pgm_per) # got per exception -> special case
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -461,29 +460,29 @@ pgm_check_handler:
pgm_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- l %r3,__LC_PGM_ILC # load program interruption code
+ l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3
pgm_do_call:
- l %r7,BASED(.Ljump_table)
- sll %r8,2
- l %r7,0(%r8,%r7) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
- la %r14,BASED(sysc_return)
- br %r7 # branch to interrupt-handler
+ l %r7,BASED(.Ljump_table)
+ sll %r8,2
+ l %r7,0(%r8,%r7) # load address of handler routine
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ la %r14,BASED(sysc_return)
+ br %r7 # branch to interrupt-handler
#
# handle per exception
#
pgm_per:
- tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
- bnz BASED(pgm_per_std) # ok, normal per event from user space
+ tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
+ bnz BASED(pgm_per_std) # ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
- clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
- be BASED(pgm_svcper)
+ clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
+ be BASED(pgm_svcper)
# no interesting special case, ignore PER event
- lm %r12,%r15,__LC_SAVE_AREA
- lpsw 0x28
+ lm %r12,%r15,__LC_SAVE_AREA
+ lpsw 0x28
#
# Normal per exception
@@ -507,10 +506,10 @@ pgm_no_vtime2:
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
tm SP_PSW+1(%r15),0x01 # kernel per event ?
bz BASED(kernel_per)
- l %r3,__LC_PGM_ILC # load program interruption code
+ l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
- nr %r8,%r3 # clear per-event-bit and ilc
- be BASED(sysc_return) # only per or per+check ?
+ nr %r8,%r3 # clear per-event-bit and ilc
+ be BASED(sysc_return) # only per or per+check ?
b BASED(pgm_do_call)
#
@@ -552,7 +551,7 @@ kernel_per:
* IO interrupt handler routine
*/
- .globl io_int_handler
+ .globl io_int_handler
io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
@@ -569,42 +568,42 @@ io_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
- l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
- la %r2,SP_PTREGS(%r15) # address of register-save area
- basr %r14,%r1 # branch to standard irq handler
+ l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ basr %r14,%r1 # branch to standard irq handler
TRACE_IRQS_ON
io_return:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
#ifdef CONFIG_PREEMPT
- bno BASED(io_preempt) # no -> check for preemptive scheduling
+ bno BASED(io_preempt) # no -> check for preemptive scheduling
#else
- bno BASED(io_leave) # no-> skip resched & signal
+ bno BASED(io_leave) # no-> skip resched & signal
#endif
tm __TI_flags+3(%r9),_TIF_WORK_INT
- bnz BASED(io_work) # there is work to do (signals etc.)
+ bnz BASED(io_work) # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL __LC_RETURN_PSW,0
+ RESTORE_ALL __LC_RETURN_PSW,0
io_done:
#ifdef CONFIG_PREEMPT
io_preempt:
icm %r0,15,__TI_precount(%r9)
- bnz BASED(io_leave)
+ bnz BASED(io_leave)
l %r1,SP_R15(%r15)
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
io_resume_loop:
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bno BASED(io_leave)
- mvc __TI_precount(4,%r9),BASED(.Lc_pactive)
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- l %r1,BASED(.Lschedule)
+ mvc __TI_precount(4,%r9),BASED(.Lc_pactive)
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ l %r1,BASED(.Lschedule)
basr %r14,%r1 # call schedule
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
- xc __TI_precount(4,%r9),__TI_precount(%r9)
+ stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ xc __TI_precount(4,%r9),__TI_precount(%r9)
b BASED(io_resume_loop)
#endif
@@ -615,16 +614,16 @@ io_work:
l %r1,__LC_KERNEL_STACK
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
#
# One of the work bits is on. Find out which one.
# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED
-# and _TIF_MCCK_PENDING
+# and _TIF_MCCK_PENDING
#
io_work_loop:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
- bo BASED(io_mcck_pending)
+ bo BASED(io_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(io_reschedule)
tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
@@ -637,36 +636,36 @@ io_work_loop:
io_mcck_pending:
l %r1,BASED(.Ls390_handle_mcck)
la %r14,BASED(io_work_loop)
- br %r1 # TIF bit will be cleared by handler
+ br %r1 # TIF bit will be cleared by handler
#
# _TIF_NEED_RESCHED is set, call schedule
-#
-io_reschedule:
- l %r1,BASED(.Lschedule)
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- basr %r14,%r1 # call scheduler
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+#
+io_reschedule:
+ l %r1,BASED(.Lschedule)
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ basr %r14,%r1 # call scheduler
+ stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
tm __TI_flags+3(%r9),_TIF_WORK_INT
- bz BASED(io_leave) # there is no work to do
+ bz BASED(io_leave) # there is no work to do
b BASED(io_work_loop)
#
# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
#
-io_sigpending:
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Ldo_signal)
- basr %r14,%r1 # call do_signal
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+io_sigpending:
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ l %r1,BASED(.Ldo_signal)
+ basr %r14,%r1 # call do_signal
+ stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
b BASED(io_work_loop)
/*
* External interrupt handler routine
*/
- .globl ext_int_handler
+ .globl ext_int_handler
ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
@@ -683,8 +682,8 @@ ext_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
- la %r2,SP_PTREGS(%r15) # address of register-save area
- lh %r3,__LC_EXT_INT_CODE # get interruption code
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ lh %r3,__LC_EXT_INT_CODE # get interruption code
l %r1,BASED(.Ldo_extint)
basr %r14,%r1
TRACE_IRQS_ON
@@ -696,13 +695,13 @@ __critical_end:
* Machine check handler routines
*/
- .globl mcck_int_handler
+ .globl mcck_int_handler
mcck_int_handler:
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32
la %r12,__LC_MCK_OLD_PSW
- tm __LC_MCCK_CODE,0x80 # system damage?
+ tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
@@ -741,7 +740,7 @@ mcck_int_main:
l %r15,__LC_PANIC_STACK # load panic stack
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
+ tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(mcck_no_vtime)
@@ -752,14 +751,14 @@ mcck_no_vtime:
#endif
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs
- l %r1,BASED(.Ls390_mcck)
- basr %r14,%r1 # call machine check handler
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ l %r1,BASED(.Ls390_mcck)
+ basr %r14,%r1 # call machine check handler
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(mcck_return)
- l %r1,__LC_KERNEL_STACK # switch to kernel stack
+ l %r1,__LC_KERNEL_STACK # switch to kernel stack
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
stosm __SF_EMPTY(%r15),0x04 # turn dat on
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
@@ -783,36 +782,36 @@ mcck_return:
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
lpsw __LC_RETURN_MCCK_PSW # back to caller
- RESTORE_ALL __LC_RETURN_MCCK_PSW,0
+ RESTORE_ALL __LC_RETURN_MCCK_PSW,0
#ifdef CONFIG_SMP
/*
* Restart interruption handler, kick starter for additional CPUs
*/
- .globl restart_int_handler
+ .globl restart_int_handler
restart_int_handler:
- l %r15,__LC_SAVE_AREA+60 # load ksp
- lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
- lam %a0,%a15,__LC_AREGS_SAVE_AREA
- lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
- stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
- basr %r14,0
- l %r14,restart_addr-.(%r14)
- br %r14 # branch to start_secondary
+ l %r15,__LC_SAVE_AREA+60 # load ksp
+ lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
+ lam %a0,%a15,__LC_AREGS_SAVE_AREA
+ lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+ stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
+ basr %r14,0
+ l %r14,restart_addr-.(%r14)
+ br %r14 # branch to start_secondary
restart_addr:
- .long start_secondary
+ .long start_secondary
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
- .globl restart_int_handler
+ .globl restart_int_handler
restart_int_handler:
- basr %r1,0
+ basr %r1,0
restart_base:
- lpsw restart_crash-restart_base(%r1)
- .align 8
+ lpsw restart_crash-restart_base(%r1)
+ .align 8
restart_crash:
- .long 0x000a0000,0x00000000
+ .long 0x000a0000,0x00000000
restart_go:
#endif
@@ -834,11 +833,11 @@ stack_overflow:
be BASED(0f)
la %r1,__LC_SAVE_AREA+16
0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
l %r1,BASED(1f) # branch to kernel_stack_overflow
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ la %r2,SP_PTREGS(%r15) # load pt_regs
br %r1
-1: .long kernel_stack_overflow
+1: .long kernel_stack_overflow
#endif
cleanup_table_system_call:
@@ -940,10 +939,10 @@ cleanup_novtime:
cleanup_system_call_insn:
.long sysc_saveall + 0x80000000
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- .long system_call + 0x80000000
- .long sysc_vtime + 0x80000000
- .long sysc_stime + 0x80000000
- .long sysc_update + 0x80000000
+ .long system_call + 0x80000000
+ .long sysc_vtime + 0x80000000
+ .long sysc_stime + 0x80000000
+ .long sysc_update + 0x80000000
#endif
cleanup_sysc_return:
@@ -1009,57 +1008,57 @@ cleanup_io_leave_insn:
/*
* Integer constants
*/
- .align 4
-.Lc_spsize: .long SP_SIZE
-.Lc_overhead: .long STACK_FRAME_OVERHEAD
-.Lc_pactive: .long PREEMPT_ACTIVE
-.Lnr_syscalls: .long NR_syscalls
-.L0x018: .short 0x018
-.L0x020: .short 0x020
-.L0x028: .short 0x028
-.L0x030: .short 0x030
-.L0x038: .short 0x038
-.Lc_1: .long 1
+ .align 4
+.Lc_spsize: .long SP_SIZE
+.Lc_overhead: .long STACK_FRAME_OVERHEAD
+.Lc_pactive: .long PREEMPT_ACTIVE
+.Lnr_syscalls: .long NR_syscalls
+.L0x018: .short 0x018
+.L0x020: .short 0x020
+.L0x028: .short 0x028
+.L0x030: .short 0x030
+.L0x038: .short 0x038
+.Lc_1: .long 1
/*
* Symbol constants
*/
-.Ls390_mcck: .long s390_do_machine_check
+.Ls390_mcck: .long s390_do_machine_check
.Ls390_handle_mcck:
- .long s390_handle_mcck
-.Lmck_old_psw: .long __LC_MCK_OLD_PSW
-.Ldo_IRQ: .long do_IRQ
-.Ldo_extint: .long do_extint
-.Ldo_signal: .long do_signal
-.Lhandle_per: .long do_single_step
-.Ljump_table: .long pgm_check_table
-.Lschedule: .long schedule
-.Lclone: .long sys_clone
-.Lexecve: .long sys_execve
-.Lfork: .long sys_fork
-.Lrt_sigreturn:.long sys_rt_sigreturn
+ .long s390_handle_mcck
+.Lmck_old_psw: .long __LC_MCK_OLD_PSW
+.Ldo_IRQ: .long do_IRQ
+.Ldo_extint: .long do_extint
+.Ldo_signal: .long do_signal
+.Lhandle_per: .long do_single_step
+.Ljump_table: .long pgm_check_table
+.Lschedule: .long schedule
+.Lclone: .long sys_clone
+.Lexecve: .long sys_execve
+.Lfork: .long sys_fork
+.Lrt_sigreturn: .long sys_rt_sigreturn
.Lrt_sigsuspend:
- .long sys_rt_sigsuspend
-.Lsigreturn: .long sys_sigreturn
-.Lsigsuspend: .long sys_sigsuspend
-.Lsigaltstack: .long sys_sigaltstack
-.Ltrace: .long syscall_trace
-.Lvfork: .long sys_vfork
-.Lschedtail: .long schedule_tail
-.Lsysc_table: .long sys_call_table
+ .long sys_rt_sigsuspend
+.Lsigreturn: .long sys_sigreturn
+.Lsigsuspend: .long sys_sigsuspend
+.Lsigaltstack: .long sys_sigaltstack
+.Ltrace: .long syscall_trace
+.Lvfork: .long sys_vfork
+.Lschedtail: .long schedule_tail
+.Lsysc_table: .long sys_call_table
#ifdef CONFIG_TRACE_IRQFLAGS
-.Ltrace_irq_on:.long trace_hardirqs_on
+.Ltrace_irq_on: .long trace_hardirqs_on
.Ltrace_irq_off:
- .long trace_hardirqs_off
+ .long trace_hardirqs_off
#endif
.Lcritical_start:
- .long __critical_start + 0x80000000
+ .long __critical_start + 0x80000000
.Lcritical_end:
- .long __critical_end + 0x80000000
+ .long __critical_end + 0x80000000
.Lcleanup_critical:
- .long cleanup_critical
+ .long cleanup_critical
- .section .rodata, "a"
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
sys_call_table:
#include "syscalls.S"
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 29bbfbab7332..0f758c329a5d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -4,8 +4,8 @@
*
* Copyright (C) IBM Corp. 1999,2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Hartmut Penner (hp@de.ibm.com),
- * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ * Hartmut Penner (hp@de.ibm.com),
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -24,29 +24,29 @@
* Stack layout for the system_call stack entry.
* The first few entries are identical to the user_regs_struct.
*/
-SP_PTREGS = STACK_FRAME_OVERHEAD
-SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
-SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
-SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
-SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
-SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
-SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
-SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
-SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
-SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
-SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
-SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
-SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
-SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
-SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
-SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
-SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
-SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
-SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
-SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
-SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
-SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
-SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
+SP_PTREGS = STACK_FRAME_OVERHEAD
+SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
+SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
+SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
+SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
+SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
+SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
+SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
+SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
+SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
+SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
+SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
+SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
+SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
+SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
+SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
+SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
+SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
+SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
+SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
+SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
+SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
@@ -71,14 +71,14 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
#define TRACE_IRQS_OFF
#endif
- .macro STORE_TIMER lc_offset
+ .macro STORE_TIMER lc_offset
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
stpt \lc_offset
#endif
.endm
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- .macro UPDATE_VTIME lc_from,lc_to,lc_sum
+ .macro UPDATE_VTIME lc_from,lc_to,lc_sum
lg %r10,\lc_from
slg %r10,\lc_to
alg %r10,\lc_sum
@@ -94,7 +94,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE savearea
+ .macro SAVE_ALL_BASE savearea
stmg %r12,%r15,\savearea
larl %r13,system_call
.endm
@@ -139,8 +139,8 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
.endm
.macro CREATE_STACK_FRAME psworg,savearea
- aghi %r15,-SP_SIZE # make room for registers & psw
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+ aghi %r15,-SP_SIZE # make room for registers & psw
+ mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
la %r12,\psworg
stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
icm %r12,12,__LC_SVC_ILC
@@ -149,7 +149,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
la %r12,0
stg %r12,__SF_BACKCHAIN(%r15)
- .endm
+ .endm
.macro RESTORE_ALL psworg,sync
mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
@@ -168,29 +168,29 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
* Returns:
* gpr2 = prev
*/
- .globl __switch_to
+ .globl __switch_to
__switch_to:
tm __THREAD_per+4(%r3),0xe8 # is the new process using per ?
jz __switch_to_noper # if not we're fine
- stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
- clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
- je __switch_to_noper # we got away without bashing TLB's
- lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
+ stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
+ clc __THREAD_per(24,%r3),__SF_EMPTY(%r15)
+ je __switch_to_noper # we got away without bashing TLB's
+ lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
__switch_to_noper:
- lg %r4,__THREAD_info(%r2) # get thread_info of prev
+ lg %r4,__THREAD_info(%r2) # get thread_info of prev
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
jz __switch_to_no_mcck
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
lg %r4,__THREAD_info(%r3) # get thread_info of next
oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next
__switch_to_no_mcck:
- stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
+ stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
- lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
+ lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- lg %r3,__THREAD_info(%r3) # load thread_info from task struct
+ lg %r3,__THREAD_info(%r3) # load thread_info from task struct
stg %r3,__LC_THREAD_INFO
aghi %r3,STACK_SIZE
stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
@@ -202,14 +202,14 @@ __critical_start:
* are executed with interrupts enabled.
*/
- .globl system_call
+ .globl system_call
system_call:
STORE_TIMER __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+ CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
+ llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
sysc_vtime:
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
@@ -222,45 +222,45 @@ sysc_update:
#endif
sysc_do_svc:
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- slag %r7,%r7,2 # *4 and test for svc 0
+ slag %r7,%r7,2 # *4 and test for svc 0
jnz sysc_nr_ok
# svc 0: system call number in %r1
cl %r1,BASED(.Lnr_syscalls)
jnl sysc_nr_ok
- lgfr %r7,%r1 # clear high word in r1
- slag %r7,%r7,2 # svc 0: system call number in %r1
+ lgfr %r7,%r1 # clear high word in r1
+ slag %r7,%r7,2 # svc 0: system call number in %r1
sysc_nr_ok:
mvc SP_ARGS(8,%r15),SP_R7(%r15)
sysc_do_restart:
- larl %r10,sys_call_table
+ larl %r10,sys_call_table
#ifdef CONFIG_COMPAT
tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ?
jno sysc_noemu
- larl %r10,sys_call_table_emu # use 31 bit emulation system calls
+ larl %r10,sys_call_table_emu # use 31 bit emulation system calls
sysc_noemu:
#endif
tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
- lgf %r8,0(%r7,%r10) # load address of system call routine
- jnz sysc_tracesys
- basr %r14,%r8 # call sys_xxxx
- stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
- # ATTENTION: check sys_execve_glue before
- # changing anything here !!
+ lgf %r8,0(%r7,%r10) # load address of system call routine
+ jnz sysc_tracesys
+ basr %r14,%r8 # call sys_xxxx
+ stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
+ # ATTENTION: check sys_execve_glue before
+ # changing anything here !!
sysc_return:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
- jno sysc_leave
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
+ jno sysc_leave
tm __TI_flags+7(%r9),_TIF_WORK_SVC
- jnz sysc_work # there is work to do (signals etc.)
+ jnz sysc_work # there is work to do (signals etc.)
sysc_leave:
- RESTORE_ALL __LC_RETURN_PSW,1
+ RESTORE_ALL __LC_RETURN_PSW,1
#
# recheck if there is more work to do
#
sysc_work_loop:
tm __TI_flags+7(%r9),_TIF_WORK_SVC
- jz sysc_leave # there is no work to do
+ jz sysc_leave # there is no work to do
#
# One of the work bits is on. Find out which one.
#
@@ -279,25 +279,25 @@ sysc_work:
#
# _TIF_NEED_RESCHED is set, call schedule
-#
-sysc_reschedule:
- larl %r14,sysc_work_loop
- jg schedule # return point is sysc_return
+#
+sysc_reschedule:
+ larl %r14,sysc_work_loop
+ jg schedule # return point is sysc_return
#
# _TIF_MCCK_PENDING is set, call handler
#
sysc_mcck_pending:
larl %r14,sysc_work_loop
- jg s390_handle_mcck # TIF bit will be cleared by handler
+ jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
#
-sysc_sigpending:
+sysc_sigpending:
ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
- la %r2,SP_PTREGS(%r15) # load pt_regs
- brasl %r14,do_signal # call do_signal
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ brasl %r14,do_signal # call do_signal
tm __TI_flags+7(%r9),_TIF_RESTART_SVC
jo sysc_restart
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
@@ -309,11 +309,11 @@ sysc_sigpending:
#
sysc_restart:
ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
- lg %r7,SP_R2(%r15) # load new svc number
- slag %r7,%r7,2 # *4
+ lg %r7,SP_R2(%r15) # load new svc number
+ slag %r7,%r7,2 # *4
mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
- j sysc_do_restart # restart svc
+ lmg %r2,%r6,SP_R2(%r15) # load svc arguments
+ j sysc_do_restart # restart svc
#
# _TIF_SINGLE_STEP is set, call do_single_step
@@ -326,49 +326,48 @@ sysc_singlestep:
larl %r14,sysc_return # load adr. of system return
jg do_single_step # branch to do_sigtrap
-
#
# call syscall_trace before and after system call
# special linkage: %r12 contains the return address for trace_svc
#
sysc_tracesys:
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ la %r2,SP_PTREGS(%r15) # load pt_regs
la %r3,0
srl %r7,2
- stg %r7,SP_R2(%r15)
- brasl %r14,syscall_trace
+ stg %r7,SP_R2(%r15)
+ brasl %r14,syscall_trace
lghi %r0,NR_syscalls
clg %r0,SP_R2(%r15)
jnh sysc_tracenogo
- lg %r7,SP_R2(%r15) # strace might have changed the
- sll %r7,2 # system call
+ lg %r7,SP_R2(%r15) # strace might have changed the
+ sll %r7,2 # system call
lgf %r8,0(%r7,%r10)
sysc_tracego:
- lmg %r3,%r6,SP_R3(%r15)
- lg %r2,SP_ORIG_R2(%r15)
- basr %r14,%r8 # call sys_xxx
- stg %r2,SP_R2(%r15) # store return value
+ lmg %r3,%r6,SP_R3(%r15)
+ lg %r2,SP_ORIG_R2(%r15)
+ basr %r14,%r8 # call sys_xxx
+ stg %r2,SP_R2(%r15) # store return value
sysc_tracenogo:
tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
- jz sysc_return
- la %r2,SP_PTREGS(%r15) # load pt_regs
+ jz sysc_return
+ la %r2,SP_PTREGS(%r15) # load pt_regs
la %r3,1
- larl %r14,sysc_return # return point is sysc_return
+ larl %r14,sysc_return # return point is sysc_return
jg syscall_trace
#
# a new process exits the kernel with ret_from_fork
#
- .globl ret_from_fork
+ .globl ret_from_fork
ret_from_fork:
lg %r13,__LC_SVC_NEW_PSW+8
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
jo 0f
stg %r15,SP_R15(%r15) # store stack pointer for new kthread
-0: brasl %r14,schedule_tail
+0: brasl %r14,schedule_tail
TRACE_IRQS_ON
- stosm 24(%r15),0x03 # reenable interrupts
+ stosm 24(%r15),0x03 # reenable interrupts
j sysc_return
#
@@ -377,78 +376,78 @@ ret_from_fork:
# but are called with different parameter.
# return-address is set up above
#
-sys_clone_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys_clone # branch to sys_clone
+sys_clone_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ jg sys_clone # branch to sys_clone
#ifdef CONFIG_COMPAT
-sys32_clone_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys32_clone # branch to sys32_clone
+sys32_clone_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ jg sys32_clone # branch to sys32_clone
#endif
-sys_fork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys_fork # branch to sys_fork
-
-sys_vfork_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- jg sys_vfork # branch to sys_vfork
-
-sys_execve_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- lgr %r12,%r14 # save return address
- brasl %r14,sys_execve # call sys_execve
- ltgr %r2,%r2 # check if execve failed
- bnz 0(%r12) # it did fail -> store result in gpr2
- b 6(%r12) # SKIP STG 2,SP_R2(15) in
- # system_call/sysc_tracesys
+sys_fork_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ jg sys_fork # branch to sys_fork
+
+sys_vfork_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ jg sys_vfork # branch to sys_vfork
+
+sys_execve_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ lgr %r12,%r14 # save return address
+ brasl %r14,sys_execve # call sys_execve
+ ltgr %r2,%r2 # check if execve failed
+ bnz 0(%r12) # it did fail -> store result in gpr2
+ b 6(%r12) # SKIP STG 2,SP_R2(15) in
+ # system_call/sysc_tracesys
#ifdef CONFIG_COMPAT
-sys32_execve_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs
- lgr %r12,%r14 # save return address
- brasl %r14,sys32_execve # call sys32_execve
- ltgr %r2,%r2 # check if execve failed
- bnz 0(%r12) # it did fail -> store result in gpr2
- b 6(%r12) # SKIP STG 2,SP_R2(15) in
- # system_call/sysc_tracesys
+sys32_execve_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs
+ lgr %r12,%r14 # save return address
+ brasl %r14,sys32_execve # call sys32_execve
+ ltgr %r2,%r2 # check if execve failed
+ bnz 0(%r12) # it did fail -> store result in gpr2
+ b 6(%r12) # SKIP STG 2,SP_R2(15) in
+ # system_call/sysc_tracesys
#endif
-sys_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys_sigreturn # branch to sys_sigreturn
+sys_sigreturn_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
+ jg sys_sigreturn # branch to sys_sigreturn
#ifdef CONFIG_COMPAT
-sys32_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys32_sigreturn # branch to sys32_sigreturn
+sys32_sigreturn_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
+ jg sys32_sigreturn # branch to sys32_sigreturn
#endif
-sys_rt_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys_rt_sigreturn # branch to sys_sigreturn
+sys_rt_sigreturn_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
+ jg sys_rt_sigreturn # branch to sys_sigreturn
#ifdef CONFIG_COMPAT
-sys32_rt_sigreturn_glue:
- la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys32_rt_sigreturn # branch to sys32_sigreturn
+sys32_rt_sigreturn_glue:
+ la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
+ jg sys32_rt_sigreturn # branch to sys32_sigreturn
#endif
sys_sigaltstack_glue:
- la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys_sigaltstack # branch to sys_sigreturn
+ la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
+ jg sys_sigaltstack # branch to sys_sigreturn
#ifdef CONFIG_COMPAT
sys32_sigaltstack_glue:
- la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
- jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
+ la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
+ jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
#endif
/*
* Program check handler routine
*/
- .globl pgm_check_handler
+ .globl pgm_check_handler
pgm_check_handler:
/*
* First we need to check for a special case:
@@ -465,8 +464,8 @@ pgm_check_handler:
*/
STORE_TIMER __LC_SYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA
- tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
- jnz pgm_per # got per exception -> special case
+ tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
+ jnz pgm_per # got per exception -> special case
SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -478,29 +477,29 @@ pgm_check_handler:
pgm_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- lgf %r3,__LC_PGM_ILC # load program interruption code
+ lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3
pgm_do_call:
- sll %r8,3
- larl %r1,pgm_check_table
- lg %r1,0(%r8,%r1) # load address of handler routine
- la %r2,SP_PTREGS(%r15) # address of register-save area
+ sll %r8,3
+ larl %r1,pgm_check_table
+ lg %r1,0(%r8,%r1) # load address of handler routine
+ la %r2,SP_PTREGS(%r15) # address of register-save area
larl %r14,sysc_return
- br %r1 # branch to interrupt-handler
+ br %r1 # branch to interrupt-handler
#
# handle per exception
#
pgm_per:
- tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
- jnz pgm_per_std # ok, normal per event from user space
+ tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
+ jnz pgm_per_std # ok, normal per event from user space
# ok its one of the special cases, now we need to find out which one
- clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
- je pgm_svcper
+ clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
+ je pgm_svcper
# no interesting special case, ignore PER event
lmg %r12,%r15,__LC_SAVE_AREA
- lpswe __LC_PGM_OLD_PSW
+ lpswe __LC_PGM_OLD_PSW
#
# Normal per exception
@@ -524,9 +523,9 @@ pgm_no_vtime2:
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
- lgf %r3,__LC_PGM_ILC # load program interruption code
+ lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
- ngr %r8,%r3 # clear per-event-bit and ilc
+ ngr %r8,%r3 # clear per-event-bit and ilc
je sysc_return
j pgm_do_call
@@ -544,7 +543,7 @@ pgm_svcper:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime3:
#endif
- llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+ llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
@@ -568,7 +567,7 @@ kernel_per:
/*
* IO interrupt handler routine
*/
- .globl io_int_handler
+ .globl io_int_handler
io_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
@@ -585,42 +584,42 @@ io_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
- la %r2,SP_PTREGS(%r15) # address of register-save area
- brasl %r14,do_IRQ # call standard irq handler
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ brasl %r14,do_IRQ # call standard irq handler
TRACE_IRQS_ON
io_return:
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
#ifdef CONFIG_PREEMPT
- jno io_preempt # no -> check for preemptive scheduling
+ jno io_preempt # no -> check for preemptive scheduling
#else
- jno io_leave # no-> skip resched & signal
+ jno io_leave # no-> skip resched & signal
#endif
tm __TI_flags+7(%r9),_TIF_WORK_INT
- jnz io_work # there is work to do (signals etc.)
+ jnz io_work # there is work to do (signals etc.)
io_leave:
- RESTORE_ALL __LC_RETURN_PSW,0
+ RESTORE_ALL __LC_RETURN_PSW,0
io_done:
#ifdef CONFIG_PREEMPT
io_preempt:
- icm %r0,15,__TI_precount(%r9)
- jnz io_leave
+ icm %r0,15,__TI_precount(%r9)
+ jnz io_leave
# switch to kernel stack
lg %r1,SP_R15(%r15)
aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
io_resume_loop:
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
jno io_leave
- larl %r1,.Lc_pactive
- mvc __TI_precount(4,%r9),0(%r1)
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- brasl %r14,schedule # call schedule
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
- xc __TI_precount(4,%r9),__TI_precount(%r9)
+ larl %r1,.Lc_pactive
+ mvc __TI_precount(4,%r9),0(%r1)
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ brasl %r14,schedule # call schedule
+ stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ xc __TI_precount(4,%r9),__TI_precount(%r9)
j io_resume_loop
#endif
@@ -631,7 +630,7 @@ io_work:
lg %r1,__LC_KERNEL_STACK
aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
#
# One of the work bits is on. Find out which one.
@@ -656,11 +655,11 @@ io_mcck_pending:
#
# _TIF_NEED_RESCHED is set, call schedule
-#
-io_reschedule:
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- brasl %r14,schedule # call scheduler
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+#
+io_reschedule:
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ brasl %r14,schedule # call scheduler
+ stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
tm __TI_flags+7(%r9),_TIF_WORK_INT
jz io_leave # there is no work to do
j io_work_loop
@@ -668,17 +667,17 @@ io_reschedule:
#
# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
#
-io_sigpending:
- stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
- la %r2,SP_PTREGS(%r15) # load pt_regs
+io_sigpending:
+ stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal
- stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
+ stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
j io_work_loop
/*
* External interrupt handler routine
*/
- .globl ext_int_handler
+ .globl ext_int_handler
ext_int_handler:
STORE_TIMER __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
@@ -695,9 +694,9 @@ ext_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
- la %r2,SP_PTREGS(%r15) # address of register-save area
- llgh %r3,__LC_EXT_INT_CODE # get interruption code
- brasl %r14,do_extint
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ llgh %r3,__LC_EXT_INT_CODE # get interruption code
+ brasl %r14,do_extint
TRACE_IRQS_ON
j io_return
@@ -706,14 +705,14 @@ __critical_end:
/*
* Machine check handler routines
*/
- .globl mcck_int_handler
+ .globl mcck_int_handler
mcck_int_handler:
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+64
la %r12,__LC_MCK_OLD_PSW
- tm __LC_MCCK_CODE,0x80 # system damage?
+ tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
la %r14,4095
@@ -737,19 +736,19 @@ mcck_int_handler:
#endif
tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical
- tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
+ tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
jnz mcck_int_main # from user -> load kernel stack
clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
jhe mcck_int_main
- clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
+ clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
jl mcck_int_main
- brasl %r14,cleanup_critical
+ brasl %r14,cleanup_critical
mcck_int_main:
- lg %r14,__LC_PANIC_STACK # are we already on the panic stack?
+ lg %r14,__LC_PANIC_STACK # are we already on the panic stack?
slgr %r14,%r15
srag %r14,%r14,PAGE_SHIFT
jz 0f
- lg %r15,__LC_PANIC_STACK # load panic stack
+ lg %r15,__LC_PANIC_STACK # load panic stack
0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
@@ -764,7 +763,7 @@ mcck_no_vtime:
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,s390_do_machine_check
- tm SP_PSW+1(%r15),0x01 # returning to user ?
+ tm SP_PSW+1(%r15),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
aghi %r1,-SP_SIZE
@@ -794,28 +793,28 @@ mcck_return:
/*
* Restart interruption handler, kick starter for additional CPUs
*/
- .globl restart_int_handler
+ .globl restart_int_handler
restart_int_handler:
- lg %r15,__LC_SAVE_AREA+120 # load ksp
- lghi %r10,__LC_CREGS_SAVE_AREA
- lctlg %c0,%c15,0(%r10) # get new ctl regs
- lghi %r10,__LC_AREGS_SAVE_AREA
- lam %a0,%a15,0(%r10)
- lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
- stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
- jg start_secondary
+ lg %r15,__LC_SAVE_AREA+120 # load ksp
+ lghi %r10,__LC_CREGS_SAVE_AREA
+ lctlg %c0,%c15,0(%r10) # get new ctl regs
+ lghi %r10,__LC_AREGS_SAVE_AREA
+ lam %a0,%a15,0(%r10)
+ lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
+ stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
+ jg start_secondary
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
- .globl restart_int_handler
+ .globl restart_int_handler
restart_int_handler:
- basr %r1,0
+ basr %r1,0
restart_base:
- lpswe restart_crash-restart_base(%r1)
- .align 8
+ lpswe restart_crash-restart_base(%r1)
+ .align 8
restart_crash:
- .long 0x000a0000,0x00000000,0x00000000,0x00000000
+ .long 0x000a0000,0x00000000,0x00000000,0x00000000
restart_go:
#endif
@@ -836,9 +835,9 @@ stack_overflow:
chi %r12,__LC_PGM_OLD_PSW
je 0f
la %r1,__LC_SAVE_AREA+32
-0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
- la %r2,SP_PTREGS(%r15) # load pt_regs
+0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
+ la %r2,SP_PTREGS(%r15) # load pt_regs
jg kernel_stack_overflow
#endif
@@ -941,10 +940,10 @@ cleanup_novtime:
cleanup_system_call_insn:
.quad sysc_saveall
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- .quad system_call
- .quad sysc_vtime
- .quad sysc_stime
- .quad sysc_update
+ .quad system_call
+ .quad sysc_vtime
+ .quad sysc_stime
+ .quad sysc_update
#endif
cleanup_sysc_return:
@@ -1010,21 +1009,21 @@ cleanup_io_leave_insn:
/*
* Integer constants
*/
- .align 4
+ .align 4
.Lconst:
-.Lc_pactive: .long PREEMPT_ACTIVE
-.Lnr_syscalls: .long NR_syscalls
-.L0x0130: .short 0x130
-.L0x0140: .short 0x140
-.L0x0150: .short 0x150
-.L0x0160: .short 0x160
-.L0x0170: .short 0x170
+.Lc_pactive: .long PREEMPT_ACTIVE
+.Lnr_syscalls: .long NR_syscalls
+.L0x0130: .short 0x130
+.L0x0140: .short 0x140
+.L0x0150: .short 0x150
+.L0x0160: .short 0x160
+.L0x0170: .short 0x170
.Lcritical_start:
- .quad __critical_start
+ .quad __critical_start
.Lcritical_end:
- .quad __critical_end
+ .quad __critical_end
- .section .rodata, "a"
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esame
sys_call_table:
#include "syscalls.S"
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 0f1db268a8a9..0cf59bb7a857 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -36,449 +36,449 @@
#endif
#ifndef CONFIG_IPL
- .org 0
- .long 0x00080000,0x80000000+startup # Just a restart PSW
+ .org 0
+ .long 0x00080000,0x80000000+startup # Just a restart PSW
#else
#ifdef CONFIG_IPL_TAPE
#define IPL_BS 1024
- .org 0
- .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
- .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
- .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
- .long 0x00000000,0x00000000 # external old psw
- .long 0x00000000,0x00000000 # svc old psw
- .long 0x00000000,0x00000000 # program check old psw
- .long 0x00000000,0x00000000 # machine check old psw
- .long 0x00000000,0x00000000 # io old psw
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x000a0000,0x00000058 # external new psw
- .long 0x000a0000,0x00000060 # svc new psw
- .long 0x000a0000,0x00000068 # program check new psw
- .long 0x000a0000,0x00000070 # machine check new psw
- .long 0x00080000,0x80000000+.Lioint # io new psw
+ .org 0
+ .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
+ .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
+ .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
+ .long 0x00000000,0x00000000 # external old psw
+ .long 0x00000000,0x00000000 # svc old psw
+ .long 0x00000000,0x00000000 # program check old psw
+ .long 0x00000000,0x00000000 # machine check old psw
+ .long 0x00000000,0x00000000 # io old psw
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x000a0000,0x00000058 # external new psw
+ .long 0x000a0000,0x00000060 # svc new psw
+ .long 0x000a0000,0x00000068 # program check new psw
+ .long 0x000a0000,0x00000070 # machine check new psw
+ .long 0x00080000,0x80000000+.Lioint # io new psw
- .org 0x100
+ .org 0x100
#
# subroutine for loading from tape
-# Paramters:
+# Paramters:
# R1 = device number
# R2 = load address
-.Lloader:
- st %r14,.Lldret
- la %r3,.Lorbread # r3 = address of orb
- la %r5,.Lirb # r5 = address of irb
- st %r2,.Lccwread+4 # initialize CCW data addresses
- lctl %c6,%c6,.Lcr6
- slr %r2,%r2
+.Lloader:
+ st %r14,.Lldret
+ la %r3,.Lorbread # r3 = address of orb
+ la %r5,.Lirb # r5 = address of irb
+ st %r2,.Lccwread+4 # initialize CCW data addresses
+ lctl %c6,%c6,.Lcr6
+ slr %r2,%r2
.Lldlp:
- la %r6,3 # 3 retries
+ la %r6,3 # 3 retries
.Lssch:
- ssch 0(%r3) # load chunk of IPL_BS bytes
- bnz .Llderr
+ ssch 0(%r3) # load chunk of IPL_BS bytes
+ bnz .Llderr
.Lw4end:
- bas %r14,.Lwait4io
- tm 8(%r5),0x82 # do we have a problem ?
- bnz .Lrecov
- slr %r7,%r7
- icm %r7,3,10(%r5) # get residual count
- lcr %r7,%r7
- la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
- ar %r2,%r7 # add to total size
- tm 8(%r5),0x01 # found a tape mark ?
- bnz .Ldone
- l %r0,.Lccwread+4 # update CCW data addresses
- ar %r0,%r7
- st %r0,.Lccwread+4
- b .Lldlp
+ bas %r14,.Lwait4io
+ tm 8(%r5),0x82 # do we have a problem ?
+ bnz .Lrecov
+ slr %r7,%r7
+ icm %r7,3,10(%r5) # get residual count
+ lcr %r7,%r7
+ la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
+ ar %r2,%r7 # add to total size
+ tm 8(%r5),0x01 # found a tape mark ?
+ bnz .Ldone
+ l %r0,.Lccwread+4 # update CCW data addresses
+ ar %r0,%r7
+ st %r0,.Lccwread+4
+ b .Lldlp
.Ldone:
- l %r14,.Lldret
- br %r14 # r2 contains the total size
+ l %r14,.Lldret
+ br %r14 # r2 contains the total size
.Lrecov:
- bas %r14,.Lsense # do the sensing
- bct %r6,.Lssch # dec. retry count & branch
- b .Llderr
+ bas %r14,.Lsense # do the sensing
+ bct %r6,.Lssch # dec. retry count & branch
+ b .Llderr
#
# Sense subroutine
#
.Lsense:
- st %r14,.Lsnsret
- la %r7,.Lorbsense
- ssch 0(%r7) # start sense command
- bnz .Llderr
- bas %r14,.Lwait4io
- l %r14,.Lsnsret
- tm 8(%r5),0x82 # do we have a problem ?
- bnz .Llderr
- br %r14
+ st %r14,.Lsnsret
+ la %r7,.Lorbsense
+ ssch 0(%r7) # start sense command
+ bnz .Llderr
+ bas %r14,.Lwait4io
+ l %r14,.Lsnsret
+ tm 8(%r5),0x82 # do we have a problem ?
+ bnz .Llderr
+ br %r14
#
# Wait for interrupt subroutine
#
.Lwait4io:
- lpsw .Lwaitpsw
+ lpsw .Lwaitpsw
.Lioint:
- c %r1,0xb8 # compare subchannel number
- bne .Lwait4io
- tsch 0(%r5)
- slr %r0,%r0
- tm 8(%r5),0x82 # do we have a problem ?
- bnz .Lwtexit
- tm 8(%r5),0x04 # got device end ?
- bz .Lwait4io
+ c %r1,0xb8 # compare subchannel number
+ bne .Lwait4io
+ tsch 0(%r5)
+ slr %r0,%r0
+ tm 8(%r5),0x82 # do we have a problem ?
+ bnz .Lwtexit
+ tm 8(%r5),0x04 # got device end ?
+ bz .Lwait4io
.Lwtexit:
- br %r14
+ br %r14
.Llderr:
- lpsw .Lcrash
+ lpsw .Lcrash
- .align 8
+ .align 8
.Lorbread:
- .long 0x00000000,0x0080ff00,.Lccwread
- .align 8
+ .long 0x00000000,0x0080ff00,.Lccwread
+ .align 8
.Lorbsense:
- .long 0x00000000,0x0080ff00,.Lccwsense
- .align 8
+ .long 0x00000000,0x0080ff00,.Lccwsense
+ .align 8
.Lccwread:
- .long 0x02200000+IPL_BS,0x00000000
+ .long 0x02200000+IPL_BS,0x00000000
.Lccwsense:
- .long 0x04200001,0x00000000
+ .long 0x04200001,0x00000000
.Lwaitpsw:
- .long 0x020a0000,0x80000000+.Lioint
+ .long 0x020a0000,0x80000000+.Lioint
-.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6: .long 0xff000000
- .align 8
-.Lcrash:.long 0x000a0000,0x00000000
-.Lldret:.long 0
+.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6: .long 0xff000000
+ .align 8
+.Lcrash:.long 0x000a0000,0x00000000
+.Lldret:.long 0
.Lsnsret: .long 0
-#endif /* CONFIG_IPL_TAPE */
+#endif /* CONFIG_IPL_TAPE */
#ifdef CONFIG_IPL_VM
-#define IPL_BS 0x730
- .org 0
- .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
- .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
- .long 0x02000068,0x60000050 # (a PSW and two CCWs).
- .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
- .long 0x020000f0,0x60000050 # The next 160 byte are loaded
- .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
- .long 0x02000190,0x60000050 # They form the continuation
- .long 0x020001e0,0x60000050 # of the CCW program started
- .long 0x02000230,0x60000050 # by ipl and load the range
- .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
- .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
- .long 0x02000320,0x60000050 # in memory. At the end of
- .long 0x02000370,0x60000050 # the channel program the PSW
- .long 0x020003c0,0x60000050 # at location 0 is loaded.
- .long 0x02000410,0x60000050 # Initial processing starts
- .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
- .long 0x020004b0,0x60000050
- .long 0x02000500,0x60000050
- .long 0x02000550,0x60000050
- .long 0x020005a0,0x60000050
- .long 0x020005f0,0x60000050
- .long 0x02000640,0x60000050
- .long 0x02000690,0x60000050
- .long 0x020006e0,0x20000050
+#define IPL_BS 0x730
+ .org 0
+ .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
+ .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
+ .long 0x02000068,0x60000050 # (a PSW and two CCWs).
+ .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
+ .long 0x020000f0,0x60000050 # The next 160 byte are loaded
+ .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
+ .long 0x02000190,0x60000050 # They form the continuation
+ .long 0x020001e0,0x60000050 # of the CCW program started
+ .long 0x02000230,0x60000050 # by ipl and load the range
+ .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
+ .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
+ .long 0x02000320,0x60000050 # in memory. At the end of
+ .long 0x02000370,0x60000050 # the channel program the PSW
+ .long 0x020003c0,0x60000050 # at location 0 is loaded.
+ .long 0x02000410,0x60000050 # Initial processing starts
+ .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
+ .long 0x020004b0,0x60000050
+ .long 0x02000500,0x60000050
+ .long 0x02000550,0x60000050
+ .long 0x020005a0,0x60000050
+ .long 0x020005f0,0x60000050
+ .long 0x02000640,0x60000050
+ .long 0x02000690,0x60000050
+ .long 0x020006e0,0x20000050
- .org 0xf0
+ .org 0xf0
#
# subroutine for loading cards from the reader
#
-.Lloader:
- la %r3,.Lorb # r2 = address of orb into r2
- la %r5,.Lirb # r4 = address of irb
- la %r6,.Lccws
- la %r7,20
+.Lloader:
+ la %r3,.Lorb # r2 = address of orb into r2
+ la %r5,.Lirb # r4 = address of irb
+ la %r6,.Lccws
+ la %r7,20
.Linit:
- st %r2,4(%r6) # initialize CCW data addresses
- la %r2,0x50(%r2)
- la %r6,8(%r6)
- bct 7,.Linit
+ st %r2,4(%r6) # initialize CCW data addresses
+ la %r2,0x50(%r2)
+ la %r6,8(%r6)
+ bct 7,.Linit
- lctl %c6,%c6,.Lcr6 # set IO subclass mask
- slr %r2,%r2
+ lctl %c6,%c6,.Lcr6 # set IO subclass mask
+ slr %r2,%r2
.Lldlp:
- ssch 0(%r3) # load chunk of 1600 bytes
- bnz .Llderr
+ ssch 0(%r3) # load chunk of 1600 bytes
+ bnz .Llderr
.Lwait4irq:
- mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
- lpsw .Lwaitpsw
+ mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
+ lpsw .Lwaitpsw
.Lioint:
- c %r1,0xb8 # compare subchannel number
- bne .Lwait4irq
- tsch 0(%r5)
+ c %r1,0xb8 # compare subchannel number
+ bne .Lwait4irq
+ tsch 0(%r5)
- slr %r0,%r0
- ic %r0,8(%r5) # get device status
- chi %r0,8 # channel end ?
- be .Lcont
- chi %r0,12 # channel end + device end ?
- be .Lcont
+ slr %r0,%r0
+ ic %r0,8(%r5) # get device status
+ chi %r0,8 # channel end ?
+ be .Lcont
+ chi %r0,12 # channel end + device end ?
+ be .Lcont
- l %r0,4(%r5)
- s %r0,8(%r3) # r0/8 = number of ccws executed
- mhi %r0,10 # *10 = number of bytes in ccws
- lh %r3,10(%r5) # get residual count
- sr %r0,%r3 # #ccws*80-residual=#bytes read
- ar %r2,%r0
-
- br %r14 # r2 contains the total size
+ l %r0,4(%r5)
+ s %r0,8(%r3) # r0/8 = number of ccws executed
+ mhi %r0,10 # *10 = number of bytes in ccws
+ lh %r3,10(%r5) # get residual count
+ sr %r0,%r3 # #ccws*80-residual=#bytes read
+ ar %r2,%r0
+
+ br %r14 # r2 contains the total size
.Lcont:
- ahi %r2,0x640 # add 0x640 to total size
- la %r6,.Lccws
- la %r7,20
+ ahi %r2,0x640 # add 0x640 to total size
+ la %r6,.Lccws
+ la %r7,20
.Lincr:
- l %r0,4(%r6) # update CCW data addresses
- ahi %r0,0x640
- st %r0,4(%r6)
- ahi %r6,8
- bct 7,.Lincr
+ l %r0,4(%r6) # update CCW data addresses
+ ahi %r0,0x640
+ st %r0,4(%r6)
+ ahi %r6,8
+ bct 7,.Lincr
- b .Lldlp
+ b .Lldlp
.Llderr:
- lpsw .Lcrash
+ lpsw .Lcrash
- .align 8
-.Lorb: .long 0x00000000,0x0080ff00,.Lccws
-.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6: .long 0xff000000
-.Lloadp:.long 0,0
- .align 8
-.Lcrash:.long 0x000a0000,0x00000000
+ .align 8
+.Lorb: .long 0x00000000,0x0080ff00,.Lccws
+.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+.Lcr6: .long 0xff000000
+.Lloadp:.long 0,0
+ .align 8
+.Lcrash:.long 0x000a0000,0x00000000
.Lnewpsw:
- .long 0x00080000,0x80000000+.Lioint
+ .long 0x00080000,0x80000000+.Lioint
.Lwaitpsw:
- .long 0x020a0000,0x80000000+.Lioint
+ .long 0x020a0000,0x80000000+.Lioint
- .align 8
-.Lccws: .rept 19
- .long 0x02600050,0x00000000
- .endr
- .long 0x02200050,0x00000000
-#endif /* CONFIG_IPL_VM */
+ .align 8
+.Lccws: .rept 19
+ .long 0x02600050,0x00000000
+ .endr
+ .long 0x02200050,0x00000000
+#endif /* CONFIG_IPL_VM */
iplstart:
- lh %r1,0xb8 # test if subchannel number
- bct %r1,.Lnoload # is valid
- l %r1,0xb8 # load ipl subchannel number
- la %r2,IPL_BS # load start address
- bas %r14,.Lloader # load rest of ipl image
- l %r12,.Lparm # pointer to parameter area
- st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
+ lh %r1,0xb8 # test if subchannel number
+ bct %r1,.Lnoload # is valid
+ l %r1,0xb8 # load ipl subchannel number
+ la %r2,IPL_BS # load start address
+ bas %r14,.Lloader # load rest of ipl image
+ l %r12,.Lparm # pointer to parameter area
+ st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
#
# load parameter file from ipl device
#
.Lagain1:
- l %r2,.Linitrd # ramdisk loc. is temp
- bas %r14,.Lloader # load parameter file
- ltr %r2,%r2 # got anything ?
- bz .Lnopf
- chi %r2,895
- bnh .Lnotrunc
- la %r2,895
+ l %r2,.Linitrd # ramdisk loc. is temp
+ bas %r14,.Lloader # load parameter file
+ ltr %r2,%r2 # got anything ?
+ bz .Lnopf
+ chi %r2,895
+ bnh .Lnotrunc
+ la %r2,895
.Lnotrunc:
- l %r4,.Linitrd
- clc 0(3,%r4),.L_hdr # if it is HDRx
- bz .Lagain1 # skip dataset header
- clc 0(3,%r4),.L_eof # if it is EOFx
- bz .Lagain1 # skip dateset trailer
- la %r5,0(%r4,%r2)
- lr %r3,%r2
+ l %r4,.Linitrd
+ clc 0(3,%r4),.L_hdr # if it is HDRx
+ bz .Lagain1 # skip dataset header
+ clc 0(3,%r4),.L_eof # if it is EOFx
+ bz .Lagain1 # skip dateset trailer
+ la %r5,0(%r4,%r2)
+ lr %r3,%r2
.Lidebc:
- tm 0(%r5),0x80 # high order bit set ?
- bo .Ldocv # yes -> convert from EBCDIC
- ahi %r5,-1
- bct %r3,.Lidebc
- b .Lnocv
+ tm 0(%r5),0x80 # high order bit set ?
+ bo .Ldocv # yes -> convert from EBCDIC
+ ahi %r5,-1
+ bct %r3,.Lidebc
+ b .Lnocv
.Ldocv:
- l %r3,.Lcvtab
- tr 0(256,%r4),0(%r3) # convert parameters to ascii
- tr 256(256,%r4),0(%r3)
- tr 512(256,%r4),0(%r3)
- tr 768(122,%r4),0(%r3)
-.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
- mvc 0(256,%r3),0(%r4)
- mvc 256(256,%r3),256(%r4)
- mvc 512(256,%r3),512(%r4)
- mvc 768(122,%r3),768(%r4)
- slr %r0,%r0
- b .Lcntlp
+ l %r3,.Lcvtab
+ tr 0(256,%r4),0(%r3) # convert parameters to ascii
+ tr 256(256,%r4),0(%r3)
+ tr 512(256,%r4),0(%r3)
+ tr 768(122,%r4),0(%r3)
+.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
+ mvc 0(256,%r3),0(%r4)
+ mvc 256(256,%r3),256(%r4)
+ mvc 512(256,%r3),512(%r4)
+ mvc 768(122,%r3),768(%r4)
+ slr %r0,%r0
+ b .Lcntlp
.Ldelspc:
- ic %r0,0(%r2,%r3)
- chi %r0,0x20 # is it a space ?
- be .Lcntlp
- ahi %r2,1
- b .Leolp
+ ic %r0,0(%r2,%r3)
+ chi %r0,0x20 # is it a space ?
+ be .Lcntlp
+ ahi %r2,1
+ b .Leolp
.Lcntlp:
- brct %r2,.Ldelspc
+ brct %r2,.Ldelspc
.Leolp:
- slr %r0,%r0
- stc %r0,0(%r2,%r3) # terminate buffer
+ slr %r0,%r0
+ stc %r0,0(%r2,%r3) # terminate buffer
.Lnopf:
#
# load ramdisk from ipl device
-#
+#
.Lagain2:
- l %r2,.Linitrd # addr of ramdisk
- st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
- bas %r14,.Lloader # load ramdisk
- st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk
- ltr %r2,%r2
- bnz .Lrdcont
- st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
+ l %r2,.Linitrd # addr of ramdisk
+ st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
+ bas %r14,.Lloader # load ramdisk
+ st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd
+ ltr %r2,%r2
+ bnz .Lrdcont
+ st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
.Lrdcont:
- l %r2,.Linitrd
+ l %r2,.Linitrd
- clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
- bz .Lagain2
- clc 0(3,%r2),.L_eof
- bz .Lagain2
+ clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
+ bz .Lagain2
+ clc 0(3,%r2),.L_eof
+ bz .Lagain2
#ifdef CONFIG_IPL_VM
#
# reset files in VM reader
#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running VM ?
- bno .Lnoreset
- la %r2,.Lreset
- lhi %r3,26
- diag %r2,%r3,8
- la %r5,.Lirb
- stsch 0(%r5) # check if irq is pending
- tm 30(%r5),0x0f # by verifying if any of the
- bnz .Lwaitforirq # activity or status control
- tm 31(%r5),0xff # bits is set in the schib
- bz .Lnoreset
+ stidp __LC_CPUID # store cpuid
+ tm __LC_CPUID,0xff # running VM ?
+ bno .Lnoreset
+ la %r2,.Lreset
+ lhi %r3,26
+ diag %r2,%r3,8
+ la %r5,.Lirb
+ stsch 0(%r5) # check if irq is pending
+ tm 30(%r5),0x0f # by verifying if any of the
+ bnz .Lwaitforirq # activity or status control
+ tm 31(%r5),0xff # bits is set in the schib
+ bz .Lnoreset
.Lwaitforirq:
- mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw
+ mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw
.Lwaitrdrirq:
- lpsw .Lrdrwaitpsw
+ lpsw .Lrdrwaitpsw
.Lrdrint:
- c %r1,0xb8 # compare subchannel number
- bne .Lwaitrdrirq
- la %r5,.Lirb
- tsch 0(%r5)
+ c %r1,0xb8 # compare subchannel number
+ bne .Lwaitrdrirq
+ la %r5,.Lirb
+ tsch 0(%r5)
.Lnoreset:
- b .Lnoload
+ b .Lnoload
- .align 8
+ .align 8
.Lrdrnewpsw:
- .long 0x00080000,0x80000000+.Lrdrint
+ .long 0x00080000,0x80000000+.Lrdrint
.Lrdrwaitpsw:
- .long 0x020a0000,0x80000000+.Lrdrint
+ .long 0x020a0000,0x80000000+.Lrdrint
#endif
#
# everything loaded, go for it
#
.Lnoload:
- l %r1,.Lstartup
- br %r1
+ l %r1,.Lstartup
+ br %r1
-.Linitrd:.long _end + 0x400000 # default address of initrd
+.Linitrd:.long _end + 0x400000 # default address of initrd
.Lparm: .long PARMAREA
.Lstartup: .long startup
-.Lcvtab:.long _ebcasc # ebcdic to ascii table
-.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
- .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
- .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
-.L_eof: .long 0xc5d6c600 /* C'EOF' */
-.L_hdr: .long 0xc8c4d900 /* C'HDR' */
+.Lcvtab:.long _ebcasc # ebcdic to ascii table
+.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
+ .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
+ .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
+.L_eof: .long 0xc5d6c600 /* C'EOF' */
+.L_hdr: .long 0xc8c4d900 /* C'HDR' */
-#endif /* CONFIG_IPL */
+#endif /* CONFIG_IPL */
#
# SALIPL loader support. Based on a patch by Rob van der Heij.
# This entry point is called directly from the SALIPL loader and
# doesn't need a builtin ipl record.
#
- .org 0x800
- .globl start
+ .org 0x800
+ .globl start
start:
- stm %r0,%r15,0x07b0 # store registers
- basr %r12,%r0
+ stm %r0,%r15,0x07b0 # store registers
+ basr %r12,%r0
.base:
- l %r11,.parm
- l %r8,.cmd # pointer to command buffer
+ l %r11,.parm
+ l %r8,.cmd # pointer to command buffer
- ltr %r9,%r9 # do we have SALIPL parameters?
- bp .sk8x8
+ ltr %r9,%r9 # do we have SALIPL parameters?
+ bp .sk8x8
- mvc 0(64,%r8),0x00b0 # copy saved registers
- xc 64(240-64,%r8),0(%r8) # remainder of buffer
- tr 0(64,%r8),.lowcase
- b .gotr
+ mvc 0(64,%r8),0x00b0 # copy saved registers
+ xc 64(240-64,%r8),0(%r8) # remainder of buffer
+ tr 0(64,%r8),.lowcase
+ b .gotr
.sk8x8:
- mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
+ mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
.gotr:
- l %r10,.tbl # EBCDIC to ASCII table
- tr 0(240,%r8),0(%r10)
- stidp __LC_CPUID # Are we running on VM maybe
- cli __LC_CPUID,0xff
- bnz .test
- .long 0x83300060 # diag 3,0,x'0060' - storage size
- b .done
+ l %r10,.tbl # EBCDIC to ASCII table
+ tr 0(240,%r8),0(%r10)
+ stidp __LC_CPUID # Are we running on VM maybe
+ cli __LC_CPUID,0xff
+ bnz .test
+ .long 0x83300060 # diag 3,0,x'0060' - storage size
+ b .done
.test:
- mvc 0x68(8),.pgmnw # set up pgm check handler
- l %r2,.fourmeg
- lr %r3,%r2
- bctr %r3,%r0 # 4M-1
-.loop: iske %r0,%r3
- ar %r3,%r2
+ mvc 0x68(8),.pgmnw # set up pgm check handler
+ l %r2,.fourmeg
+ lr %r3,%r2
+ bctr %r3,%r0 # 4M-1
+.loop: iske %r0,%r3
+ ar %r3,%r2
.pgmx:
- sr %r3,%r2
- la %r3,1(%r3)
+ sr %r3,%r2
+ la %r3,1(%r3)
.done:
- l %r1,.memsize
- st %r3,ARCH_OFFSET(%r1)
- slr %r0,%r0
- st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
- st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
- j startup # continue with startup
-.tbl: .long _ebcasc # translate table
-.cmd: .long COMMAND_LINE # address of command line buffer
-.parm: .long PARMAREA
+ l %r1,.memsize
+ st %r3,ARCH_OFFSET(%r1)
+ slr %r0,%r0
+ st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
+ st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
+ j startup # continue with startup
+.tbl: .long _ebcasc # translate table
+.cmd: .long COMMAND_LINE # address of command line buffer
+.parm: .long PARMAREA
.memsize: .long memory_size
.fourmeg: .long 0x00400000 # 4M
-.pgmnw: .long 0x00080000,.pgmx
+.pgmnw: .long 0x00080000,.pgmx
.lowcase:
- .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
+ .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
- .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
+ .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
- .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
+ .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
- .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
+ .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
- .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
+ .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
- .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
+ .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
- .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
+ .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
- .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
+ .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
- .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
+ .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
- .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
+ .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
- .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
+ .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
- .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
+ .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
- .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
+ .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
- .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
+ .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
- .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
+ .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 1fa9fa1ca740..1b952a3664e2 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -254,6 +254,16 @@ startup_continue:
oi 3(%r12),0x80 # set IDTE flag
.Lchkidte:
+#
+# find out if the diag 0x9c is available
+#
+ mvc __LC_PGM_NEW_PSW(8),.Lpcdiag9c-.LPG1(%r13)
+ stap __LC_CPUID+4 # store cpu address
+ lh %r1,__LC_CPUID+4
+ diag %r1,0,0x9c # test diag 0x9c
+ oi 2(%r12),1 # set diag9c flag
+.Lchkdiag9c:
+
lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
# virtual and never return ...
.align 8
@@ -281,6 +291,7 @@ startup_continue:
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
+.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
.Lmemsize:.long memory_size
.Lmchunk:.long memory_chunk
.Lmflags:.long machine_flags
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index a8bdd96494c7..b30e5897cdf7 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -15,232 +15,232 @@
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
- .org 0x10000
-startup:basr %r13,0 # get base
-.LPG0: l %r13,0f-.LPG0(%r13)
- b 0(%r13)
-0: .long startup_continue
+ .org 0x10000
+startup:basr %r13,0 # get base
+.LPG0: l %r13,0f-.LPG0(%r13)
+ b 0(%r13)
+0: .long startup_continue
#
# params at 10400 (setup.h)
#
- .org PARMAREA
- .quad 0 # IPL_DEVICE
- .quad 0 # INITRD_START
- .quad 0 # INITRD_SIZE
+ .org PARMAREA
+ .quad 0 # IPL_DEVICE
+ .quad 0 # INITRD_START
+ .quad 0 # INITRD_SIZE
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
+ .org COMMAND_LINE
+ .byte "root=/dev/ram0 ro"
+ .byte 0
- .org 0x11000
+ .org 0x11000
startup_continue:
- basr %r13,0 # get base
-.LPG1: sll %r13,1 # remove high order bit
- srl %r13,1
- lhi %r1,1 # mode 1 = esame
- mvi __LC_AR_MODE_ID,1 # set esame flag
- slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,0x12 # switch to esame mode
- sam64 # switch to 64 bit mode
- lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
- # move IPL device to lowcore
- mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+ basr %r13,0 # get base
+.LPG1: sll %r13,1 # remove high order bit
+ srl %r13,1
+ lhi %r1,1 # mode 1 = esame
+ mvi __LC_AR_MODE_ID,1 # set esame flag
+ slr %r0,%r0 # set cpuid to zero
+ sigp %r1,%r0,0x12 # switch to esame mode
+ sam64 # switch to 64 bit mode
+ lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+ lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
+ # move IPL device to lowcore
+ mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
#
# Setup stack
#
- larl %r15,init_thread_union
- lg %r14,__TI_task(%r15) # cache current in lowcore
- stg %r14,__LC_CURRENT
- aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
- stg %r15,__LC_KERNEL_STACK # set end of kernel stack
- aghi %r15,-160
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+ larl %r15,init_thread_union
+ lg %r14,__TI_task(%r15) # cache current in lowcore
+ stg %r14,__LC_CURRENT
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+ stg %r15,__LC_KERNEL_STACK # set end of kernel stack
+ aghi %r15,-160
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
- brasl %r14,ipl_save_parameters
+ brasl %r14,ipl_save_parameters
#
# clear bss memory
#
- larl %r2,__bss_start # start of bss segment
- larl %r3,_end # end of bss segment
- sgr %r3,%r2 # length of bss
- sgr %r4,%r4 #
- sgr %r5,%r5 # set src,length and pad to zero
- mvcle %r2,%r4,0 # clear mem
- jo .-4 # branch back, if not finish
+ larl %r2,__bss_start # start of bss segment
+ larl %r3,_end # end of bss segment
+ sgr %r3,%r2 # length of bss
+ sgr %r4,%r4 #
+ sgr %r5,%r5 # set src,length and pad to zero
+ mvcle %r2,%r4,0 # clear mem
+ jo .-4 # branch back, if not finish
- l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
+ l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
.Lservicecall:
- stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
+ stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
- stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
- la %r1,0x200 # set bit 22
- og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
- stg %r1,.Lcr-.LPG1(%r13)
- lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
+ stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
+ la %r1,0x200 # set bit 22
+ og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
+ stg %r1,.Lcr-.LPG1(%r13)
+ lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
- mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
- larl %r1,.Lsclph
- stg %r1,__LC_EXT_NEW_PSW+8 # set handler
+ mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
+ larl %r1,.Lsclph
+ stg %r1,__LC_EXT_NEW_PSW+8 # set handler
- larl %r4,.Lsccb # %r4 is our index for sccb stuff
- lgr %r1,%r4 # our sccb
- .insn rre,0xb2200000,%r2,%r1 # service call
- ipm %r1
- srl %r1,28 # get cc code
- xr %r3,%r3
- chi %r1,3
- be .Lfchunk-.LPG1(%r13) # leave
- chi %r1,2
- be .Lservicecall-.LPG1(%r13)
- lpswe .Lwaitsclp-.LPG1(%r13)
+ larl %r4,.Lsccb # %r4 is our index for sccb stuff
+ lgr %r1,%r4 # our sccb
+ .insn rre,0xb2200000,%r2,%r1 # service call
+ ipm %r1
+ srl %r1,28 # get cc code
+ xr %r3,%r3
+ chi %r1,3
+ be .Lfchunk-.LPG1(%r13) # leave
+ chi %r1,2
+ be .Lservicecall-.LPG1(%r13)
+ lpswe .Lwaitsclp-.LPG1(%r13)
.Lsclph:
- lh %r1,.Lsccbr-.Lsccb(%r4)
- chi %r1,0x10 # 0x0010 is the sucess code
- je .Lprocsccb # let's process the sccb
- chi %r1,0x1f0
- bne .Lfchunk-.LPG1(%r13) # unhandled error code
- c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
- bne .Lfchunk-.LPG1(%r13) # if no, give up
- l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
- b .Lservicecall-.LPG1(%r13)
+ lh %r1,.Lsccbr-.Lsccb(%r4)
+ chi %r1,0x10 # 0x0010 is the sucess code
+ je .Lprocsccb # let's process the sccb
+ chi %r1,0x1f0
+ bne .Lfchunk-.LPG1(%r13) # unhandled error code
+ c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
+ bne .Lfchunk-.LPG1(%r13) # if no, give up
+ l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
+ b .Lservicecall-.LPG1(%r13)
.Lprocsccb:
- lghi %r1,0
- icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
- jnz .Lscnd
- lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
+ lghi %r1,0
+ icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
+ jnz .Lscnd
+ lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
.Lscnd:
- xr %r3,%r3 # same logic
- ic %r3,.Lscpa1-.Lsccb(%r4)
- chi %r3,0x00
- jne .Lcompmem
- l %r3,.Lscpa2-.Lsccb(%r4)
+ xr %r3,%r3 # same logic
+ ic %r3,.Lscpa1-.Lsccb(%r4)
+ chi %r3,0x00
+ jne .Lcompmem
+ l %r3,.Lscpa2-.Lsccb(%r4)
.Lcompmem:
- mlgr %r2,%r1 # mem in MB on 128-bit
- l %r1,.Lonemb-.LPG1(%r13)
- mlgr %r2,%r1 # mem size in bytes in %r3
- b .Lfchunk-.LPG1(%r13)
+ mlgr %r2,%r1 # mem in MB on 128-bit
+ l %r1,.Lonemb-.LPG1(%r13)
+ mlgr %r2,%r1 # mem size in bytes in %r3
+ b .Lfchunk-.LPG1(%r13)
- .align 4
+ .align 4
.Lpmask:
- .byte 0
- .align 8
+ .byte 0
+ .align 8
.Lcr:
- .quad 0x00 # place holder for cr0
+ .quad 0x00 # place holder for cr0
.Lwaitsclp:
- .quad 0x0102000180000000,.Lsclph
+ .quad 0x0102000180000000,.Lsclph
.Lrcp:
- .int 0x00120001 # Read SCP forced code
+ .int 0x00120001 # Read SCP forced code
.Lrcp2:
- .int 0x00020001 # Read SCP code
+ .int 0x00020001 # Read SCP code
.Lonemb:
- .int 0x100000
+ .int 0x100000
.Lfchunk:
- # set program check new psw mask
- mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
+ # set program check new psw mask
+ mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
#
# find memory chunks.
#
- lgr %r9,%r3 # end of mem
- larl %r1,.Lchkmem # set program check address
- stg %r1,__LC_PGM_NEW_PSW+8
- la %r1,1 # test in increments of 128KB
- sllg %r1,%r1,17
- larl %r3,memory_chunk
- slgr %r4,%r4 # set start of chunk to zero
- slgr %r5,%r5 # set end of chunk to zero
- slr %r6,%r6 # set access code to zero
- la %r10,MEMORY_CHUNKS # number of chunks
+ lgr %r9,%r3 # end of mem
+ larl %r1,.Lchkmem # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
+ la %r1,1 # test in increments of 128KB
+ sllg %r1,%r1,17
+ larl %r3,memory_chunk
+ slgr %r4,%r4 # set start of chunk to zero
+ slgr %r5,%r5 # set end of chunk to zero
+ slr %r6,%r6 # set access code to zero
+ la %r10,MEMORY_CHUNKS # number of chunks
.Lloop:
- tprot 0(%r5),0 # test protection of first byte
- ipm %r7
- srl %r7,28
- clr %r6,%r7 # compare cc with last access code
- je .Lsame
- j .Lchkmem
+ tprot 0(%r5),0 # test protection of first byte
+ ipm %r7
+ srl %r7,28
+ clr %r6,%r7 # compare cc with last access code
+ je .Lsame
+ j .Lchkmem
.Lsame:
- algr %r5,%r1 # add 128KB to end of chunk
- # no need to check here,
- brc 12,.Lloop # this is the same chunk
-.Lchkmem: # > 16EB or tprot got a program check
- clgr %r4,%r5 # chunk size > 0?
- je .Lchkloop
- stg %r4,0(%r3) # store start address of chunk
- lgr %r0,%r5
- slgr %r0,%r4
- stg %r0,8(%r3) # store size of chunk
- st %r6,20(%r3) # store type of chunk
- la %r3,24(%r3)
- larl %r8,memory_size
- stg %r5,0(%r8) # store memory size
- ahi %r10,-1 # update chunk number
+ algr %r5,%r1 # add 128KB to end of chunk
+ # no need to check here,
+ brc 12,.Lloop # this is the same chunk
+.Lchkmem: # > 16EB or tprot got a program check
+ clgr %r4,%r5 # chunk size > 0?
+ je .Lchkloop
+ stg %r4,0(%r3) # store start address of chunk
+ lgr %r0,%r5
+ slgr %r0,%r4
+ stg %r0,8(%r3) # store size of chunk
+ st %r6,20(%r3) # store type of chunk
+ la %r3,24(%r3)
+ larl %r8,memory_size
+ stg %r5,0(%r8) # store memory size
+ ahi %r10,-1 # update chunk number
.Lchkloop:
- lr %r6,%r7 # set access code to last cc
+ lr %r6,%r7 # set access code to last cc
# we got an exception or we're starting a new
# chunk , we must check if we should
# still try to find valid memory (if we detected
# the amount of available storage), and if we
# have chunks left
- lghi %r4,1
- sllg %r4,%r4,31
- clgr %r5,%r4
- je .Lhsaskip
- xr %r0, %r0
- clgr %r0, %r9 # did we detect memory?
- je .Ldonemem # if not, leave
- chi %r10, 0 # do we have chunks left?
- je .Ldonemem
+ lghi %r4,1
+ sllg %r4,%r4,31
+ clgr %r5,%r4
+ je .Lhsaskip
+ xr %r0, %r0
+ clgr %r0, %r9 # did we detect memory?
+ je .Ldonemem # if not, leave
+ chi %r10, 0 # do we have chunks left?
+ je .Ldonemem
.Lhsaskip:
- algr %r5,%r1 # add 128KB to end of chunk
- lgr %r4,%r5 # potential new chunk
- clgr %r5,%r9 # should we go on?
- jl .Lloop
-.Ldonemem:
+ algr %r5,%r1 # add 128KB to end of chunk
+ lgr %r4,%r5 # potential new chunk
+ clgr %r5,%r9 # should we go on?
+ jl .Lloop
+.Ldonemem:
- larl %r12,machine_flags
+ larl %r12,machine_flags
#
# find out if we are running under VM
#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running under VM ?
- bno 0f-.LPG1(%r13)
- oi 7(%r12),1 # set VM flag
-0: lh %r0,__LC_CPUID+4 # get cpu version
- chi %r0,0x7490 # running on a P/390 ?
- bne 1f-.LPG1(%r13)
- oi 7(%r12),4 # set P/390 flag
+ stidp __LC_CPUID # store cpuid
+ tm __LC_CPUID,0xff # running under VM ?
+ bno 0f-.LPG1(%r13)
+ oi 7(%r12),1 # set VM flag
+0: lh %r0,__LC_CPUID+4 # get cpu version
+ chi %r0,0x7490 # running on a P/390 ?
+ bne 1f-.LPG1(%r13)
+ oi 7(%r12),4 # set P/390 flag
1:
#
# find out if we have the MVPG instruction
#
- la %r1,0f-.LPG1(%r13) # set program check address
- stg %r1,__LC_PGM_NEW_PSW+8
- sgr %r0,%r0
- lghi %r1,0
- lghi %r2,0
- mvpg %r1,%r2 # test MVPG instruction
- oi 7(%r12),16 # set MVPG flag
+ la %r1,0f-.LPG1(%r13) # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
+ sgr %r0,%r0
+ lghi %r1,0
+ lghi %r2,0
+ mvpg %r1,%r2 # test MVPG instruction
+ oi 7(%r12),16 # set MVPG flag
0:
#
# find out if the diag 0x44 works in 64 bit mode
#
- la %r1,0f-.LPG1(%r13) # set program check address
- stg %r1,__LC_PGM_NEW_PSW+8
- diag 0,0,0x44 # test diag 0x44
- oi 7(%r12),32 # set diag44 flag
-0:
+ la %r1,0f-.LPG1(%r13) # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
+ diag 0,0,0x44 # test diag 0x44
+ oi 7(%r12),32 # set diag44 flag
+0:
#
# find out if we have the IDTE instruction
#
- la %r1,0f-.LPG1(%r13) # set program check address
- stg %r1,__LC_PGM_NEW_PSW+8
+ la %r1,0f-.LPG1(%r13) # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
.long 0xb2b10000 # store facility list
tm 0xc8,0x08 # check bit for clearing-by-ASCE
bno 0f-.LPG1(%r13)
@@ -251,6 +251,17 @@ startup_continue:
0:
#
+# find out if the diag 0x9c is available
+#
+ la %r1,0f-.LPG1(%r13) # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
+ stap __LC_CPUID+4 # store cpu address
+ lh %r1,__LC_CPUID+4
+ diag %r1,0,0x9c # test diag 0x9c
+ oi 6(%r12),1 # set diag9c flag
+0:
+
+#
# find out if we have the MVCOS instruction
#
la %r1,0f-.LPG1(%r13) # set program check address
@@ -263,45 +274,45 @@ startup_continue:
oi 6(%r12),2 # set MVCOS flag
1:
- lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
- .align 16
-.Lentry:.quad 0x0000000180000000,_stext
-.Lctl: .quad 0x04b50002 # cr0: various things
- .quad 0 # cr1: primary space segment table
- .quad .Lduct # cr2: dispatchable unit control table
- .quad 0 # cr3: instruction authorization
- .quad 0 # cr4: instruction authorization
- .quad 0xffffffffffffffff # cr5: primary-aste origin
- .quad 0 # cr6: I/O interrupts
- .quad 0 # cr7: secondary space segment table
- .quad 0 # cr8: access registers translation
- .quad 0 # cr9: tracing off
- .quad 0 # cr10: tracing off
- .quad 0 # cr11: tracing off
- .quad 0 # cr12: tracing off
- .quad 0 # cr13: home space segment table
- .quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
-.Lpcmsk:.quad 0x0000000180000000
+ lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
+ # virtual and never return ...
+ .align 16
+.Lentry:.quad 0x0000000180000000,_stext
+.Lctl: .quad 0x04b50002 # cr0: various things
+ .quad 0 # cr1: primary space segment table
+ .quad .Lduct # cr2: dispatchable unit control table
+ .quad 0 # cr3: instruction authorization
+ .quad 0 # cr4: instruction authorization
+ .quad 0xffffffffffffffff # cr5: primary-aste origin
+ .quad 0 # cr6: I/O interrupts
+ .quad 0 # cr7: secondary space segment table
+ .quad 0 # cr8: access registers translation
+ .quad 0 # cr9: tracing off
+ .quad 0 # cr10: tracing off
+ .quad 0 # cr11: tracing off
+ .quad 0 # cr12: tracing off
+ .quad 0 # cr13: home space segment table
+ .quad 0xc0000000 # cr14: machine check handling off
+ .quad 0 # cr15: linkage stack operations
+.Lduct: .long 0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0
+.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
-.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
-.Lnop: .long 0x07000700
+.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
+.Lnop: .long 0x07000700
.Lparmaddr:
.quad PARMAREA
- .globl ipl_schib
+ .globl ipl_schib
ipl_schib:
.rept 13
.long 0
.endr
- .globl ipl_flags
+ .globl ipl_flags
ipl_flags:
- .long 0
- .globl ipl_devno
+ .long 0
+ .globl ipl_devno
ipl_devno:
.word 0
@@ -309,47 +320,47 @@ ipl_devno:
.globl s390_readinfo_sccb
s390_readinfo_sccb:
.Lsccb:
- .hword 0x1000 # length, one page
- .byte 0x00,0x00,0x00
- .byte 0x80 # variable response bit set
+ .hword 0x1000 # length, one page
+ .byte 0x00,0x00,0x00
+ .byte 0x80 # variable response bit set
.Lsccbr:
- .hword 0x00 # response code
+ .hword 0x00 # response code
.Lscpincr1:
- .hword 0x00
+ .hword 0x00
.Lscpa1:
- .byte 0x00
- .fill 89,1,0
+ .byte 0x00
+ .fill 89,1,0
.Lscpa2:
- .int 0x00
+ .int 0x00
.Lscpincr2:
- .quad 0x00
- .fill 3984,1,0
+ .quad 0x00
+ .fill 3984,1,0
.org 0x13000
#ifdef CONFIG_SHARED_KERNEL
- .org 0x100000
+ .org 0x100000
#endif
-
+
#
# startup-code, running in absolute addressing mode
#
- .globl _stext
-_stext: basr %r13,0 # get base
+ .globl _stext
+_stext: basr %r13,0 # get base
.LPG3:
# check control registers
- stctg %c0,%c15,0(%r15)
- oi 6(%r15),0x40 # enable sigp emergency signal
- oi 4(%r15),0x10 # switch on low address proctection
- lctlg %c0,%c15,0(%r15)
+ stctg %c0,%c15,0(%r15)
+ oi 6(%r15),0x40 # enable sigp emergency signal
+ oi 4(%r15),0x10 # switch on low address proctection
+ lctlg %c0,%c15,0(%r15)
- lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
- brasl %r14,start_kernel # go to C code
+ lam 0,15,.Laregs-.LPG3(%r13) # load acrs needed by uaccess
+ brasl %r14,start_kernel # go to C code
#
# We returned from start_kernel ?!? PANIK
#
- basr %r13,0
- lpswe .Ldw-.(%r13) # load disabled wait psw
+ basr %r13,0
+ lpswe .Ldw-.(%r13) # load disabled wait psw
- .align 8
-.Ldw: .quad 0x0002000180000000,0x0000000000000000
-.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+ .align 8
+.Ldw: .quad 0x0002000180000000,0x0000000000000000
+.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6555cc48e28f..1f5e782b3d05 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -120,24 +120,15 @@ static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
static int diag308(unsigned long subcode, void *addr)
{
- register unsigned long _addr asm("0") = (unsigned long)addr;
+ register unsigned long _addr asm("0") = (unsigned long) addr;
register unsigned long _rc asm("1") = 0;
- asm volatile (
- " diag %0,%2,0x308\n"
- "0: \n"
- ".section __ex_table,\"a\"\n"
-#ifdef CONFIG_64BIT
- " .align 8\n"
- " .quad 0b, 0b\n"
-#else
- " .align 4\n"
- " .long 0b, 0b\n"
-#endif
- ".previous\n"
+ asm volatile(
+ " diag %0,%2,0x308\n"
+ "0:\n"
+ EX_TABLE(0b,0b)
: "+d" (_addr), "+d" (_rc)
- : "d" (subcode) : "cc", "memory" );
-
+ : "d" (subcode) : "cc", "memory");
return _rc;
}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d3cbfa3005ec..6603fbb41d07 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -45,7 +45,7 @@
#include <asm/irq.h>
#include <asm/timer.h>
-asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
/*
* Return saved PC of a blocked thread. used in kernel/sched.
@@ -177,7 +177,8 @@ void show_regs(struct pt_regs *regs)
extern void kernel_thread_starter(void);
-__asm__(".align 4\n"
+asm(
+ ".align 4\n"
"kernel_thread_starter:\n"
" la 2,0(10)\n"
" basr 14,9\n"
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 4562cdbce8eb..0340477f3b08 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -32,58 +32,58 @@ do_reipl_asm: basr %r13,0
st %r13, __LC_PSW_SAVE_AREA+4
lctl %c6,%c6,.Lall-.Lpg0(%r13)
- lr %r1,%r2
- mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
- stsch .Lschib-.Lpg0(%r13)
- oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
- msch .Lschib-.Lpg0(%r13)
- lhi %r0,5
-.Lssch: ssch .Liplorb-.Lpg0(%r13)
+ lr %r1,%r2
+ mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+ stsch .Lschib-.Lpg0(%r13)
+ oi .Lschib+5-.Lpg0(%r13),0x84
+.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
+ msch .Lschib-.Lpg0(%r13)
+ lhi %r0,5
+.Lssch: ssch .Liplorb-.Lpg0(%r13)
jz .L001
- brct %r0,.Lssch
+ brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
-.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
+.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
+.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
.Lcont: c %r1,__LC_SUBCHANNEL_ID
jnz .Ltpi
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
jnz .Ltpi
- tsch .Liplirb-.Lpg0(%r13)
+ tsch .Liplirb-.Lpg0(%r13)
tm .Liplirb+9-.Lpg0(%r13),0xbf
- jz .L002
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
- jz .L003
- bas %r14,.Ldisab-.Lpg0(%r13)
+ jz .L002
+ bas %r14,.Ldisab-.Lpg0(%r13)
+.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
+ jz .L003
+ bas %r14,.Ldisab-.Lpg0(%r13)
.L003: spx .Lnull-.Lpg0(%r13)
- st %r1,__LC_SUBCHANNEL_ID
- lpsw 0
- sigp 0,0,0(6)
-.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
+ st %r1,__LC_SUBCHANNEL_ID
+ lpsw 0
+ sigp 0,0,0(6)
+.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
lpsw .Ldispsw-.Lpg0(%r13)
- .align 8
+ .align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .long 0xff000000
-.Lnull: .long 0x00000000
+.Lnull: .long 0x00000000
.Lctlsave1: .long 0x00000000
.Lctlsave2: .long 0x00000000
- .align 8
-.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
-.Lpcnew: .long 0x00080000,0x80000000+.Lecs
-.Lionew: .long 0x00080000,0x80000000+.Lcont
+ .align 8
+.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
+.Lpcnew: .long 0x00080000,0x80000000+.Lecs
+.Lionew: .long 0x00080000,0x80000000+.Lcont
.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
-.Ldispsw: .long 0x000a0000,0x00000000
-.Liplccws: .long 0x02000000,0x60000018
- .long 0x08000008,0x20000001
+.Ldispsw: .long 0x000a0000,0x00000000
+.Liplccws: .long 0x02000000,0x60000018
+ .long 0x08000008,0x20000001
.Liplorb: .long 0x0049504c,0x0040ff80
.long 0x00000000+.Liplccws
-.Lschib: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
+.Lschib: .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
.Liplirb: .long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
@@ -92,6 +92,3 @@ do_reipl_asm: basr %r13,0
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
-
-
-
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 95bd1e234f63..de7435054f7c 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,7 +4,7 @@
* S390 version
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
- Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
#include <asm/lowcore.h>
@@ -32,46 +32,46 @@ do_reipl_asm: basr %r13,0
stctg %c0,%c0,.Lregsave-.Lpg0(%r13)
ni .Lregsave+4-.Lpg0(%r13),0xef
lctlg %c0,%c0,.Lregsave-.Lpg0(%r13)
- lgr %r1,%r2
- mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
- stsch .Lschib-.Lpg0(%r13)
- oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
- msch .Lschib-.Lpg0(%r13)
- lghi %r0,5
-.Lssch: ssch .Liplorb-.Lpg0(%r13)
+ lgr %r1,%r2
+ mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
+ stsch .Lschib-.Lpg0(%r13)
+ oi .Lschib+5-.Lpg0(%r13),0x84
+.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
+ msch .Lschib-.Lpg0(%r13)
+ lghi %r0,5
+.Lssch: ssch .Liplorb-.Lpg0(%r13)
jz .L001
- brct %r0,.Lssch
+ brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
-.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
+.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
+.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
.Lcont: c %r1,__LC_SUBCHANNEL_ID
jnz .Ltpi
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
jnz .Ltpi
- tsch .Liplirb-.Lpg0(%r13)
+ tsch .Liplirb-.Lpg0(%r13)
tm .Liplirb+9-.Lpg0(%r13),0xbf
- jz .L002
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
- jz .L003
- bas %r14,.Ldisab-.Lpg0(%r13)
+ jz .L002
+ bas %r14,.Ldisab-.Lpg0(%r13)
+.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
+ jz .L003
+ bas %r14,.Ldisab-.Lpg0(%r13)
.L003: spx .Lnull-.Lpg0(%r13)
- st %r1,__LC_SUBCHANNEL_ID
- lhi %r1,0 # mode 0 = esa
- slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,0x12 # switch to esa mode
- lpsw 0
-.Ldisab: sll %r14,1
- srl %r14,1 # need to kill hi bit to avoid specification exceptions.
- st %r14,.Ldispsw+12-.Lpg0(%r13)
+ st %r1,__LC_SUBCHANNEL_ID
+ lhi %r1,0 # mode 0 = esa
+ slr %r0,%r0 # set cpuid to zero
+ sigp %r1,%r0,0x12 # switch to esa mode
+ lpsw 0
+.Ldisab: sll %r14,1
+ srl %r14,1 # need to kill hi bit to avoid specification exceptions.
+ st %r14,.Ldispsw+12-.Lpg0(%r13)
lpswe .Ldispsw-.Lpg0(%r13)
- .align 8
+ .align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
.Lregsave: .quad 0x0000000000000000
-.Lnull: .long 0x0000000000000000
- .align 16
+.Lnull: .long 0x0000000000000000
+ .align 16
/*
* These addresses have to be 31 bit otherwise
* the sigp will throw a specifcation exception
@@ -81,26 +81,26 @@ do_reipl_asm: basr %r13,0
* 31bit lpswe instruction a fact they appear to have
* ommited from the pop.
*/
-.Lnewpsw: .quad 0x0000000080000000
- .quad .Lpg1
-.Lpcnew: .quad 0x0000000080000000
- .quad .Lecs
-.Lionew: .quad 0x0000000080000000
- .quad .Lcont
+.Lnewpsw: .quad 0x0000000080000000
+ .quad .Lpg1
+.Lpcnew: .quad 0x0000000080000000
+ .quad .Lecs
+.Lionew: .quad 0x0000000080000000
+ .quad .Lcont
.Lwaitpsw: .quad 0x0202000080000000
- .quad .Ltpi
-.Ldispsw: .quad 0x0002000080000000
- .quad 0x0000000000000000
-.Liplccws: .long 0x02000000,0x60000018
- .long 0x08000008,0x20000001
+ .quad .Ltpi
+.Ldispsw: .quad 0x0002000080000000
+ .quad 0x0000000000000000
+.Liplccws: .long 0x02000000,0x60000018
+ .long 0x08000008,0x20000001
.Liplorb: .long 0x0049504c,0x0040ff80
.long 0x00000000+.Liplccws
-.Lschib: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
+.Lschib: .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
+ .long 0x00000000,0x00000000
.Liplirb: .long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
@@ -109,4 +109,3 @@ do_reipl_asm: basr %r13,0
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
-
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 2a25ec7147ff..f9899ff2e5b0 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -3,7 +3,7 @@
*
* (C) Copyright IBM Corp. 2005
*
- * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
+ * Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
*
*/
@@ -24,14 +24,14 @@
.text
.globl relocate_kernel
relocate_kernel:
- basr %r13,0 #base address
+ basr %r13,0 # base address
.base:
- stnsm sys_msk-.base(%r13),0xf8 #disable DAT and IRQ (external)
- spx zero64-.base(%r13) #absolute addressing mode
+ stnsm sys_msk-.base(%r13),0xf8 # disable DAT and IRQ (external)
+ spx zero64-.base(%r13) # absolute addressing mode
stctl %c0,%c15,ctlregs-.base(%r13)
stm %r0,%r15,gprregs-.base(%r13)
la %r1,load_psw-.base(%r13)
- mvc 0(8,%r0),0(%r1)
+ mvc 0(8,%r0),0(%r1)
la %r0,.back-.base(%r13)
st %r0,4(%r0)
oi 4(%r0),0x80
@@ -51,50 +51,50 @@
.back_pgm:
lm %r0,%r15,gprregs-.base(%r13)
.start_reloc:
- lhi %r10,-1 #preparing the mask
- sll %r10,12 #shift it such that it becomes 0xf000
+ lhi %r10,-1 # preparing the mask
+ sll %r10,12 # shift it such that it becomes 0xf000
.top:
- lhi %r7,4096 #load PAGE_SIZE in r7
- lhi %r9,4096 #load PAGE_SIZE in r9
- l %r5,0(%r2) #read another word for indirection page
- ahi %r2,4 #increment pointer
- tml %r5,0x1 #is it a destination page?
- je .indir_check #NO, goto "indir_check"
- lr %r6,%r5 #r6 = r5
- nr %r6,%r10 #mask it out and...
- j .top #...next iteration
+ lhi %r7,4096 # load PAGE_SIZE in r7
+ lhi %r9,4096 # load PAGE_SIZE in r9
+ l %r5,0(%r2) # read another word for indirection page
+ ahi %r2,4 # increment pointer
+ tml %r5,0x1 # is it a destination page?
+ je .indir_check # NO, goto "indir_check"
+ lr %r6,%r5 # r6 = r5
+ nr %r6,%r10 # mask it out and...
+ j .top # ...next iteration
.indir_check:
- tml %r5,0x2 #is it a indirection page?
- je .done_test #NO, goto "done_test"
- nr %r5,%r10 #YES, mask out,
- lr %r2,%r5 #move it into the right register,
- j .top #and read next...
+ tml %r5,0x2 # is it a indirection page?
+ je .done_test # NO, goto "done_test"
+ nr %r5,%r10 # YES, mask out,
+ lr %r2,%r5 # move it into the right register,
+ j .top # and read next...
.done_test:
- tml %r5,0x4 #is it the done indicator?
- je .source_test #NO! Well, then it should be the source indicator...
- j .done #ok, lets finish it here...
+ tml %r5,0x4 # is it the done indicator?
+ je .source_test # NO! Well, then it should be the source indicator...
+ j .done # ok, lets finish it here...
.source_test:
- tml %r5,0x8 #it should be a source indicator...
- je .top #NO, ignore it...
- lr %r8,%r5 #r8 = r5
- nr %r8,%r10 #masking
- 0: mvcle %r6,%r8,0x0 #copy PAGE_SIZE bytes from r8 to r6 - pad with 0
+ tml %r5,0x8 # it should be a source indicator...
+ je .top # NO, ignore it...
+ lr %r8,%r5 # r8 = r5
+ nr %r8,%r10 # masking
+ 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b
j .top
.done:
- sr %r0,%r0 #clear register r0
- la %r4,load_psw-.base(%r13) #load psw-address into the register
- o %r3,4(%r4) #or load address into psw
+ sr %r0,%r0 # clear register r0
+ la %r4,load_psw-.base(%r13) # load psw-address into the register
+ o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
- mvc 0(8,%r0),0(%r4) #copy psw to absolute address 0
+ mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
tm have_diag308-.base(%r13),0x01
jno .no_diag308
diag %r0,%r0,0x308
.no_diag308:
- sr %r1,%r1 #clear %r1
- sr %r2,%r2 #clear %r2
- sigp %r1,%r2,0x12 #set cpuid to zero
- lpsw 0 #hopefully start new kernel...
+ sr %r1,%r1 # clear %r1
+ sr %r2,%r2 # clear %r2
+ sigp %r1,%r2,0x12 # set cpuid to zero
+ lpsw 0 # hopefully start new kernel...
.align 8
zero64:
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 8cdb86e8911f..4fb443042d9c 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -3,7 +3,7 @@
*
* (C) Copyright IBM Corp. 2005
*
- * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
+ * Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
*
*/
@@ -25,10 +25,10 @@
.text
.globl relocate_kernel
relocate_kernel:
- basr %r13,0 #base address
+ basr %r13,0 # base address
.base:
- stnsm sys_msk-.base(%r13),0xf8 #disable DAT and IRQs
- spx zero64-.base(%r13) #absolute addressing mode
+ stnsm sys_msk-.base(%r13),0xf8 # disable DAT and IRQs
+ spx zero64-.base(%r13) # absolute addressing mode
stctg %c0,%c15,ctlregs-.base(%r13)
stmg %r0,%r15,gprregs-.base(%r13)
lghi %r0,3
@@ -37,16 +37,16 @@
la %r0,.back_pgm-.base(%r13)
stg %r0,0x1d8(%r0)
la %r1,load_psw-.base(%r13)
- mvc 0(8,%r0),0(%r1)
+ mvc 0(8,%r0),0(%r1)
la %r0,.back-.base(%r13)
st %r0,4(%r0)
oi 4(%r0),0x80
lghi %r0,0
diag %r0,%r0,0x308
.back:
- lhi %r1,1 #mode 1 = esame
- sigp %r1,%r0,0x12 #switch to esame mode
- sam64 #switch to 64 bit addressing mode
+ lhi %r1,1 # mode 1 = esame
+ sigp %r1,%r0,0x12 # switch to esame mode
+ sam64 # switch to 64 bit addressing mode
basr %r13,0
.back_base:
oi have_diag308-.back_base(%r13),0x01
@@ -56,50 +56,50 @@
.back_pgm:
lmg %r0,%r15,gprregs-.base(%r13)
.top:
- lghi %r7,4096 #load PAGE_SIZE in r7
- lghi %r9,4096 #load PAGE_SIZE in r9
- lg %r5,0(%r2) #read another word for indirection page
- aghi %r2,8 #increment pointer
- tml %r5,0x1 #is it a destination page?
- je .indir_check #NO, goto "indir_check"
- lgr %r6,%r5 #r6 = r5
- nill %r6,0xf000 #mask it out and...
- j .top #...next iteration
+ lghi %r7,4096 # load PAGE_SIZE in r7
+ lghi %r9,4096 # load PAGE_SIZE in r9
+ lg %r5,0(%r2) # read another word for indirection page
+ aghi %r2,8 # increment pointer
+ tml %r5,0x1 # is it a destination page?
+ je .indir_check # NO, goto "indir_check"
+ lgr %r6,%r5 # r6 = r5
+ nill %r6,0xf000 # mask it out and...
+ j .top # ...next iteration
.indir_check:
- tml %r5,0x2 #is it a indirection page?
- je .done_test #NO, goto "done_test"
- nill %r5,0xf000 #YES, mask out,
- lgr %r2,%r5 #move it into the right register,
- j .top #and read next...
+ tml %r5,0x2 # is it a indirection page?
+ je .done_test # NO, goto "done_test"
+ nill %r5,0xf000 # YES, mask out,
+ lgr %r2,%r5 # move it into the right register,
+ j .top # and read next...
.done_test:
- tml %r5,0x4 #is it the done indicator?
- je .source_test #NO! Well, then it should be the source indicator...
- j .done #ok, lets finish it here...
+ tml %r5,0x4 # is it the done indicator?
+ je .source_test # NO! Well, then it should be the source indicator...
+ j .done # ok, lets finish it here...
.source_test:
- tml %r5,0x8 #it should be a source indicator...
- je .top #NO, ignore it...
- lgr %r8,%r5 #r8 = r5
- nill %r8,0xf000 #masking
- 0: mvcle %r6,%r8,0x0 #copy PAGE_SIZE bytes from r8 to r6 - pad with 0
+ tml %r5,0x8 # it should be a source indicator...
+ je .top # NO, ignore it...
+ lgr %r8,%r5 # r8 = r5
+ nill %r8,0xf000 # masking
+ 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b
- j .top
+ j .top
.done:
- sgr %r0,%r0 #clear register r0
- la %r4,load_psw-.base(%r13) #load psw-address into the register
- o %r3,4(%r4) #or load address into psw
+ sgr %r0,%r0 # clear register r0
+ la %r4,load_psw-.base(%r13) # load psw-address into the register
+ o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
- mvc 0(8,%r0),0(%r4) #copy psw to absolute address 0
+ mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
tm have_diag308-.base(%r13),0x01
jno .no_diag308
diag %r0,%r0,0x308
.no_diag308:
- sam31 #31 bit mode
- sr %r1,%r1 #erase register r1
- sr %r2,%r2 #erase register r2
- sigp %r1,%r2,0x12 #set cpuid to zero
- lpsw 0 #hopefully start new kernel...
+ sam31 # 31 bit mode
+ sr %r1,%r1 # erase register r1
+ sr %r2,%r2 # erase register r2
+ sigp %r1,%r2,0x12 # set cpuid to zero
+ lpsw 0 # hopefully start new kernel...
- .align 8
+ .align 8
zero64:
.quad 0
load_psw:
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
index 8dfb690c159f..191303f6c1d8 100644
--- a/arch/s390/kernel/semaphore.c
+++ b/arch/s390/kernel/semaphore.c
@@ -26,17 +26,17 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_val, new_val;
- __asm__ __volatile__(" l %0,0(%3)\n"
- "0: ltr %1,%0\n"
- " jhe 1f\n"
- " lhi %1,0\n"
- "1: ar %1,%4\n"
- " cs %0,%1,0(%3)\n"
- " jl 0b\n"
- : "=&d" (old_val), "=&d" (new_val),
- "=m" (sem->count)
- : "a" (&sem->count), "d" (incr), "m" (sem->count)
- : "cc" );
+ asm volatile(
+ " l %0,0(%3)\n"
+ "0: ltr %1,%0\n"
+ " jhe 1f\n"
+ " lhi %1,0\n"
+ "1: ar %1,%4\n"
+ " cs %0,%1,0(%3)\n"
+ " jl 0b\n"
+ : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
+ : "a" (&sem->count), "d" (incr), "m" (sem->count)
+ : "cc");
return old_val;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e3d9325f6022..a21cfbb9d97e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -101,7 +101,7 @@ void __devinit cpu_init (void)
/*
* Store processor id in lowcore (used e.g. in timer_interrupt)
*/
- asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
+ asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
S390_lowcore.cpu_data.cpu_addr = addr;
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b2e6f4c8d382..a8e6199755d4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -63,7 +63,7 @@ static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig);
/*
- * Structure and data for smp_call_function(). This is designed to minimise
+5B * Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
static DEFINE_SPINLOCK(call_lock);
@@ -418,59 +418,49 @@ void smp_send_reschedule(int cpu)
/*
* parameter area for the set/clear control bit callbacks
*/
-typedef struct
-{
- __u16 start_ctl;
- __u16 end_ctl;
+struct ec_creg_mask_parms {
unsigned long orvals[16];
unsigned long andvals[16];
-} ec_creg_mask_parms;
+};
/*
* callback for setting/clearing control bits
*/
void smp_ctl_bit_callback(void *info) {
- ec_creg_mask_parms *pp;
+ struct ec_creg_mask_parms *pp = info;
unsigned long cregs[16];
int i;
- pp = (ec_creg_mask_parms *) info;
- __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
- for (i = pp->start_ctl; i <= pp->end_ctl; i++)
+ __ctl_store(cregs, 0, 15);
+ for (i = 0; i <= 15; i++)
cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
- __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
+ __ctl_load(cregs, 0, 15);
}
/*
* Set a bit in a control register of all cpus
*/
-void smp_ctl_set_bit(int cr, int bit) {
- ec_creg_mask_parms parms;
+void smp_ctl_set_bit(int cr, int bit)
+{
+ struct ec_creg_mask_parms parms;
- parms.start_ctl = cr;
- parms.end_ctl = cr;
+ memset(&parms.orvals, 0, sizeof(parms.orvals));
+ memset(&parms.andvals, 0xff, sizeof(parms.andvals));
parms.orvals[cr] = 1 << bit;
- parms.andvals[cr] = -1L;
- preempt_disable();
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_set_bit(cr, bit);
- preempt_enable();
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
* Clear a bit in a control register of all cpus
*/
-void smp_ctl_clear_bit(int cr, int bit) {
- ec_creg_mask_parms parms;
+void smp_ctl_clear_bit(int cr, int bit)
+{
+ struct ec_creg_mask_parms parms;
- parms.start_ctl = cr;
- parms.end_ctl = cr;
- parms.orvals[cr] = 0;
+ memset(&parms.orvals, 0, sizeof(parms.orvals));
+ memset(&parms.andvals, 0xff, sizeof(parms.andvals));
parms.andvals[cr] = ~(1L << bit);
- preempt_disable();
- smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
- __ctl_clear_bit(cr, bit);
- preempt_enable();
+ on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
}
/*
@@ -650,9 +640,9 @@ __cpu_up(unsigned int cpu)
sf->gprs[9] = (unsigned long) sf;
cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
- __asm__ __volatile__("stam 0,15,0(%0)"
- : : "a" (&cpu_lowcore->access_regs_save_area)
- : "memory");
+ asm volatile(
+ " stam 0,15,0(%0)"
+ : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
cpu_lowcore->current_task = (unsigned long) idle;
cpu_lowcore->cpu_data.cpu_nr = cpu;
@@ -708,7 +698,7 @@ int
__cpu_disable(void)
{
unsigned long flags;
- ec_creg_mask_parms cr_parms;
+ struct ec_creg_mask_parms cr_parms;
int cpu = smp_processor_id();
spin_lock_irqsave(&smp_reserve_lock, flags);
@@ -724,30 +714,21 @@ __cpu_disable(void)
pfault_fini();
#endif
- /* disable all external interrupts */
+ memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
+ memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
- cr_parms.start_ctl = 0;
- cr_parms.end_ctl = 0;
+ /* disable all external interrupts */
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
1<<11 | 1<<10 | 1<< 6 | 1<< 4);
- smp_ctl_bit_callback(&cr_parms);
-
/* disable all I/O interrupts */
-
- cr_parms.start_ctl = 6;
- cr_parms.end_ctl = 6;
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
1<<27 | 1<<26 | 1<<25 | 1<<24);
- smp_ctl_bit_callback(&cr_parms);
-
/* disable most machine checks */
-
- cr_parms.start_ctl = 14;
- cr_parms.end_ctl = 14;
cr_parms.orvals[14] = 0;
cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+
smp_ctl_bit_callback(&cr_parms);
spin_unlock_irqrestore(&smp_reserve_lock, flags);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 74e6178fbaf2..4bf66cc4a267 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -53,8 +53,6 @@ static u64 init_timer_cc;
static u64 jiffies_timer_cc;
static u64 xtime_cc;
-extern unsigned long wall_jiffies;
-
/*
* Scheduler clock - returns current time in nanosec units.
*/
@@ -87,9 +85,8 @@ static inline unsigned long do_gettimeoffset(void)
{
__u64 now;
- now = (get_clock() - jiffies_timer_cc) >> 12;
- /* We require the offset from the latest update of xtime */
- now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+ now = (get_clock() - jiffies_timer_cc) >> 12;
+ now -= (__u64) jiffies * USECS_PER_JIFFY;
return (unsigned long) now;
}
@@ -166,7 +163,7 @@ EXPORT_SYMBOL(do_settimeofday);
void account_ticks(struct pt_regs *regs)
{
__u64 tmp;
- __u32 ticks, xticks;
+ __u32 ticks;
/* Calculate how many ticks have passed. */
if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
@@ -204,6 +201,7 @@ void account_ticks(struct pt_regs *regs)
*/
write_seqlock(&xtime_lock);
if (S390_lowcore.jiffy_timer > xtime_cc) {
+ __u32 xticks;
tmp = S390_lowcore.jiffy_timer - xtime_cc;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
@@ -212,13 +210,11 @@ void account_ticks(struct pt_regs *regs)
xticks = 1;
xtime_cc += CLK_TICKS_PER_JIFFY;
}
- while (xticks--)
- do_timer(regs);
+ do_timer(xticks);
}
write_sequnlock(&xtime_lock);
#else
- for (xticks = ticks; xticks > 0; xticks--)
- do_timer(regs);
+ do_timer(ticks);
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -351,10 +347,12 @@ void __init time_init(void)
int cc;
/* kick the TOD clock */
- asm volatile ("STCK 0(%1)\n\t"
- "IPM %0\n\t"
- "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
- : "memory", "cc");
+ asm volatile(
+ " stck 0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "=m" (init_timer_cc)
+ : "a" (&init_timer_cc) : "cc");
switch (cc) {
case 0: /* clock in set state: all is fine */
break;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c4982c963424..3eb4fab048b8 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -597,8 +597,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
local_irq_enable();
if (MACHINE_HAS_IEEE)
- __asm__ volatile ("stfpc %0\n\t"
- : "=m" (current->thread.fp_regs.fpc));
+ asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
#ifdef CONFIG_MATHEMU
else if (regs->psw.mask & PSW_MASK_PSTATE) {
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index c42ffedfdb49..b0cfa6c4883d 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -5,5 +5,6 @@
EXTRA_AFLAGS := -traditional
lib-y += delay.o string.o uaccess_std.o
+lib-$(CONFIG_32BIT) += div64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 468f4ea33f99..027c4742a001 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -27,9 +27,7 @@ void __delay(unsigned long loops)
* yield the megahertz number of the cpu. The important function
* is udelay and that is done using the tod clock. -- martin.
*/
- __asm__ __volatile__(
- "0: brct %0,0b"
- : /* no outputs */ : "r" ((loops/2) + 1));
+ asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
}
/*
@@ -38,13 +36,12 @@ void __delay(unsigned long loops)
*/
void __udelay(unsigned long usecs)
{
- uint64_t start_cc, end_cc;
+ uint64_t start_cc;
if (usecs == 0)
return;
- asm volatile ("STCK %0" : "=m" (start_cc));
+ start_cc = get_clock();
do {
cpu_relax();
- asm volatile ("STCK %0" : "=m" (end_cc));
- } while (((end_cc - start_cc)/4096) < usecs);
+ } while (((get_clock() - start_cc)/4096) < usecs);
}
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
new file mode 100644
index 000000000000..0481f3424a13
--- /dev/null
+++ b/arch/s390/lib/div64.c
@@ -0,0 +1,151 @@
+/*
+ * arch/s390/lib/div64.c
+ *
+ * __div64_32 implementation for 31 bit.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_MARCH_G5
+
+/*
+ * Function to divide an unsigned 64 bit integer by an unsigned
+ * 31 bit integer using signed 64/32 bit division.
+ */
+static uint32_t __div64_31(uint64_t *n, uint32_t base)
+{
+ register uint32_t reg2 asm("2");
+ register uint32_t reg3 asm("3");
+ uint32_t *words = (uint32_t *) n;
+ uint32_t tmp;
+
+ /* Special case base==1, remainder = 0, quotient = n */
+ if (base == 1)
+ return 0;
+ /*
+ * Special case base==0 will cause a fixed point divide exception
+ * on the dr instruction and may not happen anyway. For the
+ * following calculation we can assume base > 1. The first
+ * signed 64 / 32 bit division with an upper half of 0 will
+ * give the correct upper half of the 64 bit quotient.
+ */
+ reg2 = 0UL;
+ reg3 = words[0];
+ asm volatile(
+ " dr %0,%2\n"
+ : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
+ words[0] = reg3;
+ reg3 = words[1];
+ /*
+ * To get the lower half of the 64 bit quotient and the 32 bit
+ * remainder we have to use a little trick. Since we only have
+ * a signed division the quotient can get too big. To avoid this
+ * the 64 bit dividend is halved, then the signed division will
+ * work. Afterwards the quotient and the remainder are doubled.
+ * If the last bit of the dividend has been one the remainder
+ * is increased by one then checked against the base. If the
+ * remainder has overflown subtract base and increase the
+ * quotient. Simple, no ?
+ */
+ asm volatile(
+ " nr %2,%1\n"
+ " srdl %0,1\n"
+ " dr %0,%3\n"
+ " alr %0,%0\n"
+ " alr %1,%1\n"
+ " alr %0,%2\n"
+ " clr %0,%3\n"
+ " jl 0f\n"
+ " slr %0,%3\n"
+ " alr %1,%2\n"
+ "0:\n"
+ : "+d" (reg2), "+d" (reg3), "=d" (tmp)
+ : "d" (base), "2" (1UL) : "cc" );
+ words[1] = reg3;
+ return reg2;
+}
+
+/*
+ * Function to divide an unsigned 64 bit integer by an unsigned
+ * 32 bit integer using the unsigned 64/31 bit division.
+ */
+uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+ uint32_t r;
+
+ /*
+ * If the most significant bit of base is set, divide n by
+ * (base/2). That allows to use 64/31 bit division and gives a
+ * good approximation of the result: n = (base/2)*q + r. The
+ * result needs to be corrected with two simple transformations.
+ * If base is already < 2^31-1 __div64_31 can be used directly.
+ */
+ r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
+ if ((signed) base < 0) {
+ uint64_t q = *n;
+ /*
+ * First transformation:
+ * n = (base/2)*q + r
+ * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
+ * Since r < (base/2), r + (base/2) < base.
+ * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
+ * n = ((base/2)*2)*q1 + r1 with r1 < base.
+ */
+ if (q & 1)
+ r += base/2;
+ q >>= 1;
+ /*
+ * Second transformation. ((base/2)*2) could have lost the
+ * last bit.
+ * n = ((base/2)*2)*q1 + r1
+ * = base*q1 - ((base&1) ? q1 : 0) + r1
+ */
+ if (base & 1) {
+ int64_t rx = r - q;
+ /*
+ * base is >= 2^31. The worst case for the while
+ * loop is n=2^64-1 base=2^31+1. That gives a
+ * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
+ * base >= 2^31 the loop is finished after a maximum
+ * of three iterations.
+ */
+ while (rx < 0) {
+ rx += base;
+ q--;
+ }
+ r = rx;
+ }
+ *n = q;
+ }
+ return r;
+}
+
+#else /* MARCH_G5 */
+
+uint32_t __div64_32(uint64_t *n, uint32_t base)
+{
+ register uint32_t reg2 asm("2");
+ register uint32_t reg3 asm("3");
+ uint32_t *words = (uint32_t *) n;
+
+ reg2 = 0UL;
+ reg3 = words[0];
+ asm volatile(
+ " dlr %0,%2\n"
+ : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
+ words[0] = reg3;
+ reg3 = words[1];
+ asm volatile(
+ " dlr %0,%2\n"
+ : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
+ words[1] = reg3;
+ return reg2;
+}
+
+#endif /* MARCH_G5 */
+
+EXPORT_SYMBOL(__div64_32);
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index b9b7958a226a..8d76403fcf89 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -24,57 +24,76 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
-static inline void
-_diag44(void)
+static inline void _raw_yield(void)
{
-#ifdef CONFIG_64BIT
if (MACHINE_HAS_DIAG44)
-#endif
asm volatile("diag 0,0,0x44");
}
-void
-_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
+static inline void _raw_yield_cpu(int cpu)
+{
+ if (MACHINE_HAS_DIAG9C)
+ asm volatile("diag %0,0,0x9c"
+ : : "d" (__cpu_logical_map[cpu]));
+ else
+ _raw_yield();
+}
+
+void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
{
int count = spin_retry;
+ unsigned int cpu = ~smp_processor_id();
while (1) {
if (count-- <= 0) {
- _diag44();
+ unsigned int owner = lp->owner_cpu;
+ if (owner != 0)
+ _raw_yield_cpu(~owner);
count = spin_retry;
}
if (__raw_spin_is_locked(lp))
continue;
- if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+ if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+ lp->owner_pc = pc;
return;
+ }
}
}
EXPORT_SYMBOL(_raw_spin_lock_wait);
-int
-_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
+int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
{
- int count = spin_retry;
+ unsigned int cpu = ~smp_processor_id();
+ int count;
- while (count-- > 0) {
+ for (count = spin_retry; count > 0; count--) {
if (__raw_spin_is_locked(lp))
continue;
- if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+ if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+ lp->owner_pc = pc;
return 1;
+ }
}
return 0;
}
EXPORT_SYMBOL(_raw_spin_trylock_retry);
-void
-_raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_spin_relax(raw_spinlock_t *lock)
+{
+ unsigned int cpu = lock->owner_cpu;
+ if (cpu != 0)
+ _raw_yield_cpu(~cpu);
+}
+EXPORT_SYMBOL(_raw_spin_relax);
+
+void _raw_read_lock_wait(raw_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _diag44();
+ _raw_yield();
count = spin_retry;
}
if (!__raw_read_can_lock(rw))
@@ -86,8 +105,7 @@ _raw_read_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-int
-_raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(raw_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -103,14 +121,13 @@ _raw_read_trylock_retry(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void
-_raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(raw_rwlock_t *rw)
{
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _diag44();
+ _raw_yield();
count = spin_retry;
}
if (!__raw_write_can_lock(rw))
@@ -121,8 +138,7 @@ _raw_write_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-int
-_raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(raw_rwlock_t *rw)
{
int count = spin_retry;
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 86c96d6c191a..121b2935a422 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -35,7 +35,7 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
tmp1 = -4096UL;
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
- " jz 4f\n"
+ " jz 7f\n"
"1:"ALR" %0,%3\n"
" "SLR" %1,%3\n"
" "SLR" %2,%3\n"
@@ -44,13 +44,23 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
" "SLR" %4,%1\n"
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
+ " jnh 4f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
" "SLR" %0,%4\n"
- " j 5f\n"
- "4:"SLR" %0,%0\n"
- "5: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ " "ALR" %2,%4\n"
+ "4:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,6f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "5: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "6:"AHI" %4,-256\n"
+ " jnm 5b\n"
+ " ex %4,0(%3)\n"
+ " j 8f\n"
+ "7:"SLR" %0,%0\n"
+ "8: \n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
return size;
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 9a4d4a29ea79..f44f0078b354 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -35,25 +35,35 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
tmp1 = -256UL;
asm volatile(
"0: mvcp 0(%0,%2),0(%1),%3\n"
- " jz 5f\n"
+ " jz 8f\n"
"1:"ALR" %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
" jnz 1b\n"
- " j 5f\n"
+ " j 8f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" "LHI" %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" "SLR" %4,%1\n"
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
+ " jnh 5f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
" "SLR" %0,%4\n"
- " j 6f\n"
- "5:"SLR" %0,%0\n"
- "6: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ " "ALR" %2,%4\n"
+ "5:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,7f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "6: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "7:"AHI" %4,-256\n"
+ " jnm 6b\n"
+ " ex %4,0(%3)\n"
+ " j 9f\n"
+ "8:"SLR" %0,%0\n"
+ "9: \n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
return size;
@@ -67,16 +77,22 @@ size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
asm volatile(
"0: mvcp 0(%0,%2),0(%1),%3\n"
" "SLR" %0,%0\n"
- " j 3f\n"
+ " j 5f\n"
"1: la %4,255(%1)\n" /* %4 = ptr + 255 */
" "LHI" %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" "SLR" %4,%1\n"
" "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 3f\n"
+ " jnh 5f\n"
"2: mvcp 0(%4,%2),0(%1),%3\n"
" "SLR" %0,%4\n"
- "3:\n"
+ " "ALR" %2,%4\n"
+ "3:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,4f\n"
+ " xc 0(1,%2),0(%2)\n"
+ "4: ex %4,0(%3)\n"
+ "5:\n"
EX_TABLE(0b,1b) EX_TABLE(2b,3b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index b4957c84e4d6..6b9aec5a2c18 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -1564,52 +1564,52 @@ static int emu_tceb (struct pt_regs *regs, int rx, long val) {
}
static inline void emu_load_regd(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
+ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return;
- asm volatile ( /* load reg from fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " ld 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
- : "1" );
+ asm volatile( /* load reg from fp_regs.fprs[reg] */
+ " bras 1,0f\n"
+ " ld 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
+ : "1");
}
static inline void emu_load_rege(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
+ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return;
- asm volatile ( /* load reg from fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " le 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
- : "1" );
+ asm volatile( /* load reg from fp_regs.fprs[reg] */
+ " bras 1,0f\n"
+ " le 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
+ : "1");
}
static inline void emu_store_regd(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
+ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return;
- asm volatile ( /* store reg to fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " std 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
- : "1" );
+ asm volatile( /* store reg to fp_regs.fprs[reg] */
+ " bras 1,0f\n"
+ " std 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
+ : "1");
}
static inline void emu_store_rege(int reg) {
- if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
+ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return;
- asm volatile ( /* store reg to fp_regs.fprs[reg] */
- " bras 1,0f\n"
- " ste 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
- : "1" );
+ asm volatile( /* store reg to fp_regs.fprs[reg] */
+ " bras 1,0f\n"
+ " ste 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
+ : "1");
}
int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
@@ -2089,23 +2089,22 @@ int math_emu_ldr(__u8 *opcode) {
if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
/* we got an exception therfore ry can't be in {0,2,4,6} */
- __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */
- " bras 1,0f\n"
- " ld 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (opc & 0xf0),
- "a" (&fp_regs->fprs[opc & 0xf].d)
- : "1" );
+ asm volatile( /* load rx from fp_regs.fprs[ry] */
+ " bras 1,0f\n"
+ " ld 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
+ : "1");
} else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
- __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */
- " bras 1,0f\n"
- " std 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" ((opc & 0xf) << 4),
- "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
- : "1" );
+ asm volatile ( /* store ry to fp_regs.fprs[rx] */
+ " bras 1,0f\n"
+ " std 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" ((opc & 0xf) << 4),
+ "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
+ : "1");
} else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
return 0;
@@ -2120,23 +2119,22 @@ int math_emu_ler(__u8 *opcode) {
if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
/* we got an exception therfore ry can't be in {0,2,4,6} */
- __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */
- " bras 1,0f\n"
- " le 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" (opc & 0xf0),
- "a" (&fp_regs->fprs[opc & 0xf].f)
- : "1" );
+ asm volatile( /* load rx from fp_regs.fprs[ry] */
+ " bras 1,0f\n"
+ " le 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
+ : "1");
} else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
- __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */
- " bras 1,0f\n"
- " ste 0,0(%1)\n"
- "0: ex %0,0(1)"
- : /* no output */
- : "a" ((opc & 0xf) << 4),
- "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
- : "1" );
+ asm volatile( /* store ry to fp_regs.fprs[rx] */
+ " bras 1,0f\n"
+ " ste 0,0(%1)\n"
+ "0: ex %0,0(1)"
+ : /* no output */
+ : "a" ((opc & 0xf) << 4),
+ "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
+ : "1");
} else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
return 0;
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h
index ab556b600f73..5b6ca4570ea4 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/arch/s390/math-emu/sfp-util.h
@@ -4,48 +4,51 @@
#include <asm/byteorder.h>
#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
- unsigned int __sh = (ah); \
- unsigned int __sl = (al); \
- __asm__ (" alr %1,%3\n" \
- " brc 12,0f\n" \
- " ahi %0,1\n" \
- "0: alr %0,%2" \
- : "+&d" (__sh), "+d" (__sl) \
- : "d" (bh), "d" (bl) : "cc" ); \
- (sh) = __sh; \
- (sl) = __sl; \
+ unsigned int __sh = (ah); \
+ unsigned int __sl = (al); \
+ asm volatile( \
+ " alr %1,%3\n" \
+ " brc 12,0f\n" \
+ " ahi %0,1\n" \
+ "0: alr %0,%2" \
+ : "+&d" (__sh), "+d" (__sl) \
+ : "d" (bh), "d" (bl) : "cc"); \
+ (sh) = __sh; \
+ (sl) = __sl; \
})
#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
- unsigned int __sh = (ah); \
- unsigned int __sl = (al); \
- __asm__ (" slr %1,%3\n" \
- " brc 3,0f\n" \
- " ahi %0,-1\n" \
- "0: slr %0,%2" \
- : "+&d" (__sh), "+d" (__sl) \
- : "d" (bh), "d" (bl) : "cc" ); \
- (sh) = __sh; \
- (sl) = __sl; \
+ unsigned int __sh = (ah); \
+ unsigned int __sl = (al); \
+ asm volatile( \
+ " slr %1,%3\n" \
+ " brc 3,0f\n" \
+ " ahi %0,-1\n" \
+ "0: slr %0,%2" \
+ : "+&d" (__sh), "+d" (__sl) \
+ : "d" (bh), "d" (bl) : "cc"); \
+ (sh) = __sh; \
+ (sl) = __sl; \
})
/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
#define umul_ppmm(wh, wl, u, v) ({ \
- unsigned int __wh = u; \
- unsigned int __wl = v; \
- __asm__ (" ltr 1,%0\n" \
- " mr 0,%1\n" \
- " jnm 0f\n" \
- " alr 0,%1\n" \
- "0: ltr %1,%1\n" \
- " jnm 1f\n" \
- " alr 0,%0\n" \
- "1: lr %0,0\n" \
- " lr %1,1\n" \
- : "+d" (__wh), "+d" (__wl) \
- : : "0", "1", "cc" ); \
- wh = __wh; \
- wl = __wl; \
+ unsigned int __wh = u; \
+ unsigned int __wl = v; \
+ asm volatile( \
+ " ltr 1,%0\n" \
+ " mr 0,%1\n" \
+ " jnm 0f\n" \
+ " alr 0,%1\n" \
+ "0: ltr %1,%1\n" \
+ " jnm 1f\n" \
+ " alr 0,%0\n" \
+ "1: lr %0,0\n" \
+ " lr %1,1\n" \
+ : "+d" (__wh), "+d" (__wl) \
+ : : "0", "1", "cc"); \
+ wh = __wh; \
+ wl = __wl; \
})
#define udiv_qrnnd(q, r, n1, n0, d) \
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9b11e3e20903..226275d5c4f6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter,
rx = (unsigned long) parameter;
ry = (unsigned long) func;
- __asm__ __volatile__(
+ asm volatile(
#ifdef CONFIG_64BIT
- " sam31\n" // switch to 31 bit
- " diag %0,%1,0x64\n"
- " sam64\n" // switch back to 64 bit
+ " sam31\n"
+ " diag %0,%1,0x64\n"
+ " sam64\n"
#else
- " diag %0,%1,0x64\n"
+ " diag %0,%1,0x64\n"
#endif
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
+ " ipm %2\n"
+ " srl %2,28\n"
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
*ret1 = rx;
*ret2 = ry;
return rc;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 44f0cda7e72e..9c3c19fe62fc 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -353,8 +353,9 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (tsk->pid == 1) {
+ if (is_init(tsk)) {
yield();
+ down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", tsk->comm);
@@ -423,20 +424,13 @@ int pfault_init(void)
if (pfault_disable)
return -1;
- __asm__ __volatile__(
- " diag %1,%0,0x258\n"
- "0: j 2f\n"
- "1: la %0,8\n"
+ asm volatile(
+ " diag %1,%0,0x258\n"
+ "0: j 2f\n"
+ "1: la %0,8\n"
"2:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
-#ifndef CONFIG_64BIT
- " .long 0b,1b\n"
-#else /* CONFIG_64BIT */
- " .quad 0b,1b\n"
-#endif /* CONFIG_64BIT */
- ".previous"
- : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
+ EX_TABLE(0b,1b)
+ : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
__ctl_set_bit(0, 9);
return rc;
}
@@ -449,18 +443,11 @@ void pfault_fini(void)
if (pfault_disable)
return;
__ctl_clear_bit(0,9);
- __asm__ __volatile__(
- " diag %0,0,0x258\n"
+ asm volatile(
+ " diag %0,0,0x258\n"
"0:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
-#ifndef CONFIG_64BIT
- " .long 0b,0b\n"
-#else /* CONFIG_64BIT */
- " .quad 0b,0b\n"
-#endif /* CONFIG_64BIT */
- ".previous"
- : : "a" (&refbk), "m" (refbk) : "cc" );
+ EX_TABLE(0b,0b)
+ : : "a" (&refbk), "m" (refbk) : "cc");
}
asmlinkage void
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index cfd9b8f7a523..127044e1707c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -45,26 +45,17 @@ void diag10(unsigned long addr)
{
if (addr >= 0x7ff00000)
return;
+ asm volatile(
#ifdef CONFIG_64BIT
- asm volatile (
- " sam31\n"
- " diag %0,%0,0x10\n"
- "0: sam64\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b, 0b\n"
- ".previous\n"
- : : "a" (addr));
+ " sam31\n"
+ " diag %0,%0,0x10\n"
+ "0: sam64\n"
#else
- asm volatile (
- " diag %0,%0,0x10\n"
+ " diag %0,%0,0x10\n"
"0:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b, 0b\n"
- ".previous\n"
- : : "a" (addr));
#endif
+ EX_TABLE(0b,0b)
+ : : "a" (addr));
}
void show_mem(void)
@@ -156,11 +147,10 @@ void __init paging_init(void)
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */
- __asm__ __volatile__(" LCTL 1,1,%0\n"
- " LCTL 7,7,%0\n"
- " LCTL 13,13,%0\n"
- " SSM %1"
- : : "m" (pgdir_k), "m" (ssm_mask));
+ __ctl_load(pgdir_k, 1, 1);
+ __ctl_load(pgdir_k, 7, 7);
+ __ctl_load(pgdir_k, 13, 13);
+ __raw_local_irq_ssm(ssm_mask);
local_flush_tlb();
return;
@@ -241,11 +231,10 @@ void __init paging_init(void)
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */
- __asm__ __volatile__("lctlg 1,1,%0\n\t"
- "lctlg 7,7,%0\n\t"
- "lctlg 13,13,%0\n\t"
- "ssm %1"
- : :"m" (pgdir_k), "m" (ssm_mask));
+ __ctl_load(pgdir_k, 1, 1);
+ __ctl_load(pgdir_k, 7, 7);
+ __ctl_load(pgdir_k, 13, 13);
+ __raw_local_irq_ssm(ssm_mask);
local_flush_tlb();