summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-03 17:24:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-03 17:24:22 -0700
commitd0989d01c66fed6a741820a96b8cca6688f183ff (patch)
tree8454b0329481fec3c2ff8fa6663fd544d8bcd919 /arch
parent865dad2022c52ac6c5c9a87c5cec78a69f633fb6 (diff)
parent2120635108b35ecad9c59c8b44f6cbdf4f98214e (diff)
downloadlinux-d0989d01c66fed6a741820a96b8cca6688f183ff.tar.bz2
Merge tag 'hardening-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull kernel hardening updates from Kees Cook: "Most of the collected changes here are fixes across the tree for various hardening features (details noted below). The most notable new feature here is the addition of the memcpy() overflow warning (under CONFIG_FORTIFY_SOURCE), which is the next step on the path to killing the common class of "trivially detectable" buffer overflow conditions (i.e. on arrays with sizes known at compile time) that have resulted in many exploitable vulnerabilities over the years (e.g. BleedingTooth). This feature is expected to still have some undiscovered false positives. It's been in -next for a full development cycle and all the reported false positives have been fixed in their respective trees. All the known-bad code patterns we could find with Coccinelle are also either fixed in their respective trees or in flight. The commit message in commit 54d9469bc515 ("fortify: Add run-time WARN for cross-field memcpy()") for the feature has extensive details, but I'll repeat here that this is a warning _only_, and is not intended to actually block overflows (yet). The many patches fixing array sizes and struct members have been landing for several years now, and we're finally able to turn this on to find any remaining stragglers. Summary: Various fixes across several hardening areas: - loadpin: Fix verity target enforcement (Matthias Kaehlcke). - zero-call-used-regs: Add missing clobbers in paravirt (Bill Wendling). - CFI: clean up sparc function pointer type mismatches (Bart Van Assche). - Clang: Adjust compiler flag detection for various Clang changes (Sami Tolvanen, Kees Cook). - fortify: Fix warnings in arch-specific code in sh, ARM, and xen. Improvements to existing features: - testing: improve overflow KUnit test, introduce fortify KUnit test, add more coverage to LKDTM tests (Bart Van Assche, Kees Cook). - overflow: Relax overflow type checking for wider utility. New features: - string: Introduce strtomem() and strtomem_pad() to fill a gap in strncpy() replacement needs. - um: Enable FORTIFY_SOURCE support. - fortify: Enable run-time struct member memcpy() overflow warning" * tag 'hardening-v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (27 commits) Makefile.extrawarn: Move -Wcast-function-type-strict to W=1 hardening: Remove Clang's enable flag for -ftrivial-auto-var-init=zero sparc: Unbreak the build x86/paravirt: add extra clobbers with ZERO_CALL_USED_REGS enabled x86/paravirt: clean up typos and grammaros fortify: Convert to struct vs member helpers fortify: Explicitly check bounds are compile-time constants x86/entry: Work around Clang __bdos() bug ARM: decompressor: Include .data.rel.ro.local fortify: Adjust KUnit test for modular build sh: machvec: Use char[] for section boundaries kunit/memcpy: Avoid pathological compile-time string size lib: Improve the is_signed_type() kunit test LoadPin: Require file with verity root digests to have a header dm: verity-loadpin: Only trust verity targets with enforcement LoadPin: Fix Kconfig doc about format of file with verity digests um: Enable FORTIFY_SOURCE lkdtm: Update tests for memcpy() run-time warnings fortify: Add run-time WARN for cross-field memcpy() fortify: Use SIZE_MAX instead of (size_t)-1 ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/kernel/machvec.c10
-rw-r--r--arch/sparc/include/asm/smp_32.h15
-rw-r--r--arch/sparc/kernel/leon_smp.c12
-rw-r--r--arch/sparc/kernel/sun4d_smp.c12
-rw-r--r--arch/sparc/kernel/sun4m_smp.c10
-rw-r--r--arch/sparc/mm/srmmu.c29
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/os-Linux/user_syms.c1
-rw-r--r--arch/x86/include/asm/paravirt_types.h27
-rw-r--r--arch/x86/xen/enlighten_pv.c3
12 files changed, 69 insertions, 55 deletions
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 1bcb68ac4b01..3fcb3e62dc56 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -23,6 +23,7 @@ SECTIONS
*(.ARM.extab*)
*(.note.*)
*(.rel.*)
+ *(.printk_index)
/*
* Discard any r/w data - this produces a link error if we have any,
* which is required for PIC decompression. Local data generates
@@ -57,6 +58,7 @@ SECTIONS
*(.rodata)
*(.rodata.*)
*(.data.rel.ro)
+ *(.data.rel.ro.*)
}
.piggydata : {
*(.piggydata)
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 8edb824049b9..0cb0ca149ac3 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -4,7 +4,7 @@
#include <asm-generic/sections.h>
-extern long __machvec_start, __machvec_end;
+extern char __machvec_start[], __machvec_end[];
extern char __uncached_start, __uncached_end;
extern char __start_eh_frame[], __stop_eh_frame[];
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index d606679a211e..57efaf5b82ae 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -20,8 +20,8 @@
#define MV_NAME_SIZE 32
#define for_each_mv(mv) \
- for ((mv) = (struct sh_machine_vector *)&__machvec_start; \
- (mv) && (unsigned long)(mv) < (unsigned long)&__machvec_end; \
+ for ((mv) = (struct sh_machine_vector *)__machvec_start; \
+ (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \
(mv)++)
static struct sh_machine_vector * __init get_mv_byname(const char *name)
@@ -87,8 +87,8 @@ void __init sh_mv_setup(void)
if (!machvec_selected) {
unsigned long machvec_size;
- machvec_size = ((unsigned long)&__machvec_end -
- (unsigned long)&__machvec_start);
+ machvec_size = ((unsigned long)__machvec_end -
+ (unsigned long)__machvec_start);
/*
* Sanity check for machvec section alignment. Ensure
@@ -102,7 +102,7 @@ void __init sh_mv_setup(void)
* vector (usually the only one) from .machvec.init.
*/
if (machvec_size >= sizeof(struct sh_machine_vector))
- sh_mv = *(struct sh_machine_vector *)&__machvec_start;
+ sh_mv = *(struct sh_machine_vector *)__machvec_start;
}
pr_notice("Booting machvec: %s\n", get_system_type());
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 856081761b0f..2cf7971d7f6c 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -33,9 +33,6 @@ extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpumask_t smp_commenced_mask;
extern struct linux_prom_registers smp_penguin_ctable;
-typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long);
-
void cpu_panic(void);
/*
@@ -57,7 +54,7 @@ void smp_bogo(struct seq_file *);
void smp_info(struct seq_file *);
struct sparc32_ipi_ops {
- void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+ void (*cross_call)(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4);
void (*resched)(int cpu);
@@ -66,28 +63,28 @@ struct sparc32_ipi_ops {
};
extern const struct sparc32_ipi_ops *sparc32_ipi_ops;
-static inline void xc0(smpfunc_t func)
+static inline void xc0(void *func)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0);
}
-static inline void xc1(smpfunc_t func, unsigned long arg1)
+static inline void xc1(void *func, unsigned long arg1)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0);
}
-static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
+static inline void xc2(void *func, unsigned long arg1, unsigned long arg2)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0);
}
-static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc3(void *func, unsigned long arg1, unsigned long arg2,
unsigned long arg3)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
arg1, arg2, arg3, 0);
}
-static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc4(void *func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4)
{
sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 1eed26d423fb..991e9ad3d3e8 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -359,7 +359,7 @@ void leonsmp_ipi_interrupt(void)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -372,7 +372,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void leon_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -384,7 +384,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
{
/* If you make changes here, make sure gcc generates proper code... */
- register smpfunc_t f asm("i0") = func;
+ register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
@@ -444,11 +444,13 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void leon_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index ff30f03beb7c..9a62a5cf3337 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -268,7 +268,7 @@ static void sun4d_ipi_resched(int cpu)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -281,7 +281,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -296,7 +296,7 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
* If you make changes here, make sure
* gcc generates proper code...
*/
- register smpfunc_t f asm("i0") = func;
+ register void *f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
register unsigned long a3 asm("i3") = arg3;
@@ -353,11 +353,13 @@ static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4d_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = hard_smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 228a6527082d..056df034e79e 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -157,7 +157,7 @@ static void sun4m_ipi_mask_one(int cpu)
}
static struct smp_funcall {
- smpfunc_t func;
+ void *func;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
@@ -170,7 +170,7 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
-static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
@@ -230,11 +230,13 @@ static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
/* Running cross calls. */
void smp4m_cross_call_irq(void)
{
+ void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
+ unsigned long) = ccall_info.func;
int i = smp_processor_id();
ccall_info.processors_in[i] = 1;
- ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
- ccall_info.arg4, ccall_info.arg5);
+ func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
+ ccall_info.arg5);
ccall_info.processors_out[i] = 1;
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index a9aa6a92c7fe..13f027afc875 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1636,19 +1636,19 @@ static void __init get_srmmu_type(void)
/* Local cross-calls. */
static void smp_flush_page_for_dma(unsigned long page)
{
- xc1((smpfunc_t) local_ops->page_for_dma, page);
+ xc1(local_ops->page_for_dma, page);
local_ops->page_for_dma(page);
}
static void smp_flush_cache_all(void)
{
- xc0((smpfunc_t) local_ops->cache_all);
+ xc0(local_ops->cache_all);
local_ops->cache_all();
}
static void smp_flush_tlb_all(void)
{
- xc0((smpfunc_t) local_ops->tlb_all);
+ xc0(local_ops->tlb_all);
local_ops->tlb_all();
}
@@ -1659,7 +1659,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+ xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm);
}
}
@@ -1671,7 +1671,7 @@ static void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
- xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+ xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
@@ -1691,8 +1691,8 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->cache_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->cache_range, (unsigned long)vma, start,
+ end);
local_ops->cache_range(vma, start, end);
}
}
@@ -1708,8 +1708,8 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->tlb_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->tlb_range, (unsigned long)vma, start,
+ end);
local_ops->tlb_range(vma, start, end);
}
}
@@ -1723,8 +1723,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->cache_page,
- (unsigned long) vma, page);
+ xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page);
}
}
@@ -1738,8 +1737,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->tlb_page,
- (unsigned long) vma, page);
+ xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page);
}
}
@@ -1753,7 +1751,7 @@ static void smp_flush_page_to_ram(unsigned long page)
* XXX This experiment failed, research further... -DaveM
*/
#if 1
- xc1((smpfunc_t) local_ops->page_to_ram, page);
+ xc1(local_ops->page_to_ram, page);
#endif
local_ops->page_to_ram(page);
}
@@ -1764,8 +1762,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->sig_insns,
- (unsigned long) mm, insn_addr);
+ xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr);
}
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 78de31ac1da7..ad4ff3b0e91e 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -6,6 +6,7 @@ config UML
bool
default y
select ARCH_EPHEMERAL_INODES
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index cb667c9225ab..fd575ecbcaec 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#define __NO_FORTIFY
#include <linux/types.h>
#include <linux/module.h>
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 89df6c6617f5..b1ab5d94881b 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -328,7 +328,7 @@ int paravirt_disable_iospace(void);
* Unfortunately, this is a relatively slow operation for modern CPUs,
* because it cannot necessarily determine what the destination
* address is. In this case, the address is a runtime constant, so at
- * the very least we can patch the call to e a simple direct call, or
+ * the very least we can patch the call to a simple direct call, or,
* ideally, patch an inline implementation into the callsite. (Direct
* calls are essentially free, because the call and return addresses
* are completely predictable.)
@@ -339,10 +339,10 @@ int paravirt_disable_iospace(void);
* on the stack. All caller-save registers (eax,edx,ecx) are expected
* to be modified (either clobbered or used for return values).
* X86_64, on the other hand, already specifies a register-based calling
- * conventions, returning at %rax, with parameters going on %rdi, %rsi,
+ * conventions, returning at %rax, with parameters going in %rdi, %rsi,
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
* special handling for dealing with 4 arguments, unlike i386.
- * However, x86_64 also have to clobber all caller saved registers, which
+ * However, x86_64 also has to clobber all caller saved registers, which
* unfortunately, are quite a bit (r8 - r11)
*
* The call instruction itself is marked by placing its start address
@@ -360,22 +360,22 @@ int paravirt_disable_iospace(void);
* There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
* It could be extended to more arguments, but there would be little
* to be gained from that. For each number of arguments, there are
- * the two VCALL and CALL variants for void and non-void functions.
+ * two VCALL and CALL variants for void and non-void functions.
*
* When there is a return value, the invoker of the macro must specify
* the return type. The macro then uses sizeof() on that type to
- * determine whether its a 32 or 64 bit value, and places the return
+ * determine whether it's a 32 or 64 bit value and places the return
* in the right register(s) (just %eax for 32-bit, and %edx:%eax for
- * 64-bit). For x86_64 machines, it just returns at %rax regardless of
+ * 64-bit). For x86_64 machines, it just returns in %rax regardless of
* the return value size.
*
- * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
+ * 64-bit arguments are passed as a pair of adjacent 32-bit arguments;
* i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
* in low,high order
*
* Small structures are passed and returned in registers. The macro
* calling convention can't directly deal with this, so the wrapper
- * functions must do this.
+ * functions must do it.
*
* These PVOP_* macros are only defined within this header. This
* means that all uses must be wrapped in inline functions. This also
@@ -414,8 +414,17 @@ int paravirt_disable_iospace(void);
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
-/* void functions are still allowed [re]ax for scratch */
+/*
+ * void functions are still allowed [re]ax for scratch.
+ *
+ * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved
+ * registers. Make sure we model this with the appropriate clobbers.
+ */
+#ifdef CONFIG_ZERO_CALL_USED_REGS
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS
+#else
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
+#endif
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 0ed2e487a693..9b1a58dda935 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -765,6 +765,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
+ static const struct trap_info zero = { };
unsigned out;
trace_xen_cpu_load_idt(desc);
@@ -774,7 +775,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
out = xen_convert_trap_info(desc, traps, false);
- memset(&traps[out], 0, sizeof(traps[0]));
+ traps[out] = zero;
xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps))