summaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/cmpxchg.h240
-rw-r--r--arch/s390/include/asm/cputime.h46
-rw-r--r--arch/s390/include/asm/debug.h29
-rw-r--r--arch/s390/include/asm/ftrace.h54
-rw-r--r--arch/s390/include/asm/idle.h3
-rw-r--r--arch/s390/include/asm/io.h9
-rw-r--r--arch/s390/include/asm/irq.h11
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h33
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h9
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
17 files changed, 176 insertions, 283 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4236408070e5..6259895fcd97 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,200 +11,28 @@
#include <linux/types.h>
#include <linux/bug.h>
-extern void __xchg_called_with_bad_pointer(void);
-
-static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
-{
- unsigned long addr, old;
- int shift;
-
- switch (size) {
- case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
- asm volatile(
- " l %0,%4\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%4\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) addr)
- : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
- "Q" (*(int *) addr) : "memory", "cc", "0");
- return old >> shift;
- case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
- asm volatile(
- " l %0,%4\n"
- "0: lr 0,%0\n"
- " nr 0,%3\n"
- " or 0,%2\n"
- " cs %0,0,%4\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) addr)
- : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
- "Q" (*(int *) addr) : "memory", "cc", "0");
- return old >> shift;
- case 4:
- asm volatile(
- " l %0,%3\n"
- "0: cs %0,%2,%3\n"
- " jl 0b\n"
- : "=&d" (old), "=Q" (*(int *) ptr)
- : "d" (x), "Q" (*(int *) ptr)
- : "memory", "cc");
- return old;
-#ifdef CONFIG_64BIT
- case 8:
- asm volatile(
- " lg %0,%3\n"
- "0: csg %0,%2,%3\n"
- " jl 0b\n"
- : "=&d" (old), "=m" (*(long *) ptr)
- : "d" (x), "Q" (*(long *) ptr)
- : "memory", "cc");
- return old;
-#endif /* CONFIG_64BIT */
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-#define xchg(ptr, x) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
- __ret; \
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __o = (o); \
+ __typeof__(*(ptr)) __n = (n); \
+ (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
})
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG
-
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long addr, prev, tmp;
- int shift;
-
- switch (size) {
- case 1:
- addr = (unsigned long) ptr;
- shift = (3 ^ (addr & 3)) << 3;
- addr ^= addr & 3;
- asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
- "1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
- : "d" ((old & 0xff) << shift),
- "d" ((new & 0xff) << shift),
- "d" (~(0xff << shift))
- : "memory", "cc");
- return prev >> shift;
- case 2:
- addr = (unsigned long) ptr;
- shift = (2 ^ (addr & 2)) << 3;
- addr ^= addr & 2;
- asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
- "1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
- : "d" ((old & 0xffff) << shift),
- "d" ((new & 0xffff) << shift),
- "d" (~(0xffff << shift))
- : "memory", "cc");
- return prev >> shift;
- case 4:
- asm volatile(
- " cs %0,%3,%1\n"
- : "=&d" (prev), "=Q" (*(int *) ptr)
- : "0" (old), "d" (new), "Q" (*(int *) ptr)
- : "memory", "cc");
- return prev;
-#ifdef CONFIG_64BIT
- case 8:
- asm volatile(
- " csg %0,%3,%1\n"
- : "=&d" (prev), "=Q" (*(long *) ptr)
- : "0" (old), "d" (new), "Q" (*(long *) ptr)
- : "memory", "cc");
- return prev;
-#endif /* CONFIG_64BIT */
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
- sizeof(*(ptr))); \
- __ret; \
-})
+#define cmpxchg64 cmpxchg
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg
-#ifdef CONFIG_64BIT
-#define cmpxchg64(ptr, o, n) \
+#define xchg(ptr, x) \
({ \
- cmpxchg((ptr), (o), (n)); \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old; \
+ do { \
+ __old = *__ptr; \
+ } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
+ __old; \
})
-#else /* CONFIG_64BIT */
-static inline unsigned long long __cmpxchg64(void *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- register_pair rp_old = {.pair = old};
- register_pair rp_new = {.pair = new};
- unsigned long long *ullptr = ptr;
- asm volatile(
- " cds %0,%2,%1"
- : "+d" (rp_old), "+Q" (*ullptr)
- : "d" (rp_new)
- : "memory", "cc");
- return rp_old.pair;
-}
-
-#define cmpxchg64(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg64((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)); \
- __ret; \
-})
-#endif /* CONFIG_64BIT */
+#define __HAVE_ARCH_CMPXCHG
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
({ \
@@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
#define system_has_cmpxchg_double() 1
-#include <asm-generic/cmpxchg-local.h>
-
-static inline unsigned long __cmpxchg_local(void *ptr,
- unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- case 2:
- case 4:
-#ifdef CONFIG_64BIT
- case 8:
-#endif
- return __cmpxchg(ptr, old, new, size);
- default:
- return __cmpxchg_local_generic(ptr, old, new, size);
- }
-
- return old;
-}
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg_local((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- __ret; \
-})
-
-#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f8c196984853..b91e960e4045 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
#include <asm/div64.h>
+#define CPUTIME_PER_USEC 4096ULL
+#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
@@ -38,24 +40,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
*/
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
{
- return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
+ return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
}
static inline cputime_t jiffies_to_cputime(const unsigned int jif)
{
- return (__force cputime_t)(jif * (4096000000ULL / HZ));
+ return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
}
static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
{
unsigned long long jif = (__force unsigned long long) cputime;
- do_div(jif, 4096000000ULL / HZ);
+ do_div(jif, CPUTIME_PER_SEC / HZ);
return jif;
}
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
- return (__force cputime64_t)(jif * (4096000000ULL / HZ));
+ return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
}
/*
@@ -68,7 +70,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
static inline cputime_t usecs_to_cputime(const unsigned int m)
{
- return (__force cputime_t)(m * 4096ULL);
+ return (__force cputime_t)(m * CPUTIME_PER_USEC);
}
#define usecs_to_cputime64(m) usecs_to_cputime(m)
@@ -78,12 +80,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
*/
static inline unsigned int cputime_to_secs(const cputime_t cputime)
{
- return __div((__force unsigned long long) cputime, 2048000000) >> 1;
+ return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
}
static inline cputime_t secs_to_cputime(const unsigned int s)
{
- return (__force cputime_t)(s * 4096000000ULL);
+ return (__force cputime_t)(s * CPUTIME_PER_SEC);
}
/*
@@ -91,8 +93,8 @@ static inline cputime_t secs_to_cputime(const unsigned int s)
*/
static inline cputime_t timespec_to_cputime(const struct timespec *value)
{
- unsigned long long ret = value->tv_sec * 4096000000ULL;
- return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
+ unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
+ return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
}
static inline void cputime_to_timespec(const cputime_t cputime,
@@ -103,12 +105,12 @@ static inline void cputime_to_timespec(const cputime_t cputime,
register_pair rp;
rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
- value->tv_nsec = rp.subreg.even * 1000 / 4096;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
+ value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
- value->tv_sec = __cputime / 4096000000ULL;
+ value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
+ value->tv_sec = __cputime / CPUTIME_PER_SEC;
#endif
}
@@ -119,8 +121,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
*/
static inline cputime_t timeval_to_cputime(const struct timeval *value)
{
- unsigned long long ret = value->tv_sec * 4096000000ULL;
- return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
+ unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
+ return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
}
static inline void cputime_to_timeval(const cputime_t cputime,
@@ -131,12 +133,12 @@ static inline void cputime_to_timeval(const cputime_t cputime,
register_pair rp;
rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
- value->tv_usec = rp.subreg.even / 4096;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
+ value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_usec = (__cputime % 4096000000ULL) / 4096;
- value->tv_sec = __cputime / 4096000000ULL;
+ value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
+ value->tv_sec = __cputime / CPUTIME_PER_SEC;
#endif
}
@@ -146,13 +148,13 @@ static inline void cputime_to_timeval(const cputime_t cputime,
static inline clock_t cputime_to_clock_t(cputime_t cputime)
{
unsigned long long clock = (__force unsigned long long) cputime;
- do_div(clock, 4096000000ULL / USER_HZ);
+ do_div(clock, CPUTIME_PER_SEC / USER_HZ);
return clock;
}
static inline cputime_t clock_t_to_cputime(unsigned long x)
{
- return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
+ return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
}
/*
@@ -161,7 +163,7 @@ static inline cputime_t clock_t_to_cputime(unsigned long x)
static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
{
unsigned long long clock = (__force unsigned long long) cputime;
- do_div(clock, 4096000000ULL / USER_HZ);
+ do_div(clock, CPUTIME_PER_SEC / USER_HZ);
return clock;
}
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15eb01e9..0206c8052328 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
-debug_sprintf_event(debug_info_t* id,int level,char *string,...)
+__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
__attribute__ ((format(printf, 3, 4)));
+#define debug_sprintf_event(_id, _level, _fmt, ...) \
+({ \
+ debug_entry_t *__ret; \
+ debug_info_t *__id = _id; \
+ int __level = _level; \
+ if ((!__id) || (__level > __id->level)) \
+ __ret = NULL; \
+ else \
+ __ret = __debug_sprintf_event(__id, __level, \
+ _fmt, ## __VA_ARGS__); \
+ __ret; \
+})
static inline debug_entry_t*
debug_exception(debug_info_t* id, int level, void* data, int length)
@@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
* stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
*/
extern debug_entry_t *
-debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
+__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
__attribute__ ((format(printf, 3, 4)));
+#define debug_sprintf_exception(_id, _level, _fmt, ...) \
+({ \
+ debug_entry_t *__ret; \
+ debug_info_t *__id = _id; \
+ int __level = _level; \
+ if ((!__id) || (__level > __id->level)) \
+ __ret = NULL; \
+ else \
+ __ret = __debug_sprintf_exception(__id, __level, \
+ _fmt, ## __VA_ARGS__);\
+ __ret; \
+})
+
int debug_register_view(debug_info_t* id, struct debug_view* view);
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3aef8afec336..abb618f1ead2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,25 +1,69 @@
#ifndef _ASM_S390_FTRACE_H
#define _ASM_S390_FTRACE_H
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+
+#define MCOUNT_INSN_SIZE 24
+#define MCOUNT_RETURN_FIXUP 18
+
#ifndef __ASSEMBLY__
-extern void _mcount(void);
+#define ftrace_return_address(n) __builtin_return_address(n)
+
+void _mcount(void);
+void ftrace_caller(void);
+
extern char ftrace_graph_caller_end;
+extern unsigned long ftrace_plt;
struct dyn_arch_ftrace { };
-#define MCOUNT_ADDR ((long)_mcount)
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+#define KPROBE_ON_FTRACE_NOP 0
+#define KPROBE_ON_FTRACE_CALL 1
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
-#endif /* __ASSEMBLY__ */
+struct ftrace_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ /* jg .+24 */
+ insn->opc = 0xc0f4;
+ insn->disp = MCOUNT_INSN_SIZE / 2;
+#endif
+}
-#define MCOUNT_INSN_SIZE 18
+static inline int is_ftrace_nop(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ if (insn->disp == MCOUNT_INSN_SIZE / 2)
+ return 1;
+#endif
+ return 0;
+}
-#define ARCH_SUPPORTS_FTRACE_OPS 1
+static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
+ unsigned long ip)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ unsigned long target;
+
+ /* brasl r0,ftrace_caller */
+ target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
+ insn->opc = 0xc005;
+ insn->disp = (target - ip) / 2;
+#endif
+}
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 6af037f574b8..113cd963dbbe 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -9,9 +9,10 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/seqlock.h>
struct s390_idle_data {
- unsigned int sequence;
+ seqcount_t seqcount;
unsigned long long idle_count;
unsigned long long idle_time;
unsigned long long clock_idle_enter;
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 6ad9013c67e7..30fd5c84680e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -39,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr)
{
}
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return NULL;
+}
+
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+
/*
* s390 needs a private implementation of pci_iomap since ioremap with its
* offset parameter isn't sufficient. That's because BAR spaces are not
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index b0d5f0a97a01..343ea7c987aa 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,11 +1,11 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
-#define EXT_INTERRUPT 1
-#define IO_INTERRUPT 2
-#define THIN_INTERRUPT 3
+#define EXT_INTERRUPT 0
+#define IO_INTERRUPT 1
+#define THIN_INTERRUPT 2
-#define NR_IRQS_BASE 4
+#define NR_IRQS_BASE 3
#ifdef CONFIG_PCI_NR_MSI
# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
@@ -13,9 +13,6 @@
# define NR_IRQS NR_IRQS_BASE
#endif
-/* This number is used when no interrupt has been assigned */
-#define NO_IRQ 0
-
/* External interruption codes */
#define EXT_IRQ_INTERRUPT_KEY 0x0040
#define EXT_IRQ_CLK_COMP 0x1004
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 98629173ce3b..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
struct arch_specific_insn {
/* copy of original instruction */
kprobe_opcode_t *insn;
+ unsigned int is_ftrace_insn : 1;
};
struct prev_kprobe {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6cc51fe84410..34fbcac61133 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -147,7 +147,7 @@ struct _lowcore {
__u32 softirq_pending; /* 0x02ec */
__u32 percpu_offset; /* 0x02f0 */
__u32 machine_flags; /* 0x02f4 */
- __u32 ftrace_func; /* 0x02f8 */
+ __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
__u32 spinlock_lockval; /* 0x02fc */
__u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
@@ -297,7 +297,7 @@ struct _lowcore {
__u64 percpu_offset; /* 0x0378 */
__u64 vdso_per_cpu_data; /* 0x0380 */
__u64 machine_flags; /* 0x0388 */
- __u64 ftrace_func; /* 0x0390 */
+ __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
__u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c030900320e0..ef803c202d42 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -50,10 +50,6 @@ struct zpci_fmb {
atomic64_t unmapped_pages;
} __packed __aligned(16);
-#define ZPCI_MSI_VEC_BITS 11
-#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
-#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
-
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
ZPCI_FN_STATE_STANDBY,
@@ -90,6 +86,7 @@ struct zpci_dev {
/* IRQ stuff */
u64 msi_addr; /* MSI address */
+ unsigned int max_msi; /* maximum number of MSI's */
struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned int aisb; /* number of the summary bit */
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index d194d544d694..f664e96f48c7 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -139,7 +139,8 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0;
while (n > 0) {
- size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
+ size = zpci_get_max_write_size((u64 __force) src,
+ (u64) dst, n, 8);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
rc = zpci_read_single(req, dst, offset, size);
if (rc)
@@ -162,7 +163,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL;
while (n > 0) {
- size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
+ size = zpci_get_max_write_size((u64 __force) dst,
+ (u64) src, n, 128);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
if (size > 8) /* main path */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d39a31c3cdf2..e510b9460efa 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
-void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
- bool init_skey);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57c882761dea..5e102422c9ab 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -133,6 +133,18 @@ extern unsigned long MODULES_END;
#define MODULES_LEN (1UL << 31)
#endif
+static inline int is_module_addr(void *addr)
+{
+#ifdef CONFIG_64BIT
+ BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
+ if (addr < (void *)MODULES_VADDR)
+ return 0;
+ if (addr > (void *)MODULES_END)
+ return 0;
+#endif
+ return 1;
+}
+
/*
* A 31 bit pagetable entry of S390 has following format:
* | PFRA | | OS |
@@ -479,6 +491,11 @@ static inline int mm_has_pgste(struct mm_struct *mm)
return 0;
}
+/*
+ * In the case that a guest uses storage keys
+ * faults should no longer be backed by zero pages
+ */
+#define mm_forbids_zeropage mm_use_skey
static inline int mm_use_skey(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -1634,6 +1651,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
return pmd;
}
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp, int full)
+{
+ pmd_t pmd = *pmdp;
+
+ if (!full)
+ pmdp_flush_lazy(mm, address, pmdp);
+ pmd_clear(pmdp);
+ return pmd;
+}
+
#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
@@ -1746,7 +1776,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
extern int s390_enable_sie(void);
-extern void s390_enable_skey(void);
+extern int s390_enable_skey(void);
+extern void s390_reset_cmma(struct mm_struct *mm);
/*
* No page table caches to initialise
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d559bdb03d18..bed05ea7ec27 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -217,8 +217,6 @@ static inline unsigned short stap(void)
*/
static inline void cpu_relax(void)
{
- if (MACHINE_HAS_DIAG44)
- asm volatile("diag 0,0,68");
barrier();
}
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index d6bdf906caa5..0e37cd041241 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -18,14 +18,7 @@ extern int spin_retry;
static inline int
_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
{
- unsigned int old_expected = old;
-
- asm volatile(
- " cs %0,%3,%1"
- : "=d" (old), "=Q" (*lock)
- : "0" (old), "d" (new), "Q" (*lock)
- : "cc", "memory" );
- return old == old_expected;
+ return __sync_bool_compare_and_swap(lock, old, new);
}
/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 572c59949004..06d8741ad6f4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -121,6 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
#endif
}
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4197c89c52d4..2b446cf0cc65 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -287,7 +287,9 @@
#define __NR_getrandom 349
#define __NR_memfd_create 350
#define __NR_bpf 351
-#define NR_syscalls 352
+#define __NR_s390_pci_mmio_write 352
+#define __NR_s390_pci_mmio_read 353
+#define NR_syscalls 354
/*
* There are some system calls that are not present on 64 bit, some