diff options
author | Borislav Petkov <bp@suse.de> | 2021-10-04 17:37:22 +0200 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2021-10-04 17:37:22 +0200 |
commit | c7419a6e1aa3219ff62045ab6e56e37d5ad2d292 (patch) | |
tree | a5145db864629e50196ba0f13e0b1c80f7d4331a /arch/x86/mm | |
parent | 7fae4c24a2b84a66c7be399727aca11e7a888462 (diff) | |
parent | e9d1d2bb75b2d5d4b426769c5aae0ce8cef3558f (diff) | |
download | linux-c7419a6e1aa3219ff62045ab6e56e37d5ad2d292.tar.bz2 |
Merge branch x86/cc into x86/core
Pick up dependent cc_platform_has() changes.
Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 26 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 55 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt_identity.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa_emulation.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/pat/memtype.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/pat/set_memory.c | 3 |
10 files changed, 64 insertions, 71 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index edb5152f0866..4bfed53e210e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -710,7 +710,8 @@ oops: static noinline void kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code, - unsigned long address, int signal, int si_code) + unsigned long address, int signal, int si_code, + u32 pkey) { WARN_ON_ONCE(user_mode(regs)); @@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code, set_signal_archinfo(address, error_code); - /* XXX: hwpoison faults will set the wrong code. */ - force_sig_fault(signal, si_code, (void __user *)address); + if (si_code == SEGV_PKUERR) { + force_sig_pkuerr((void __user *)address, pkey); + } else { + /* XXX: hwpoison faults will set the wrong code. */ + force_sig_fault(signal, si_code, (void __user *)address); + } } /* @@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; if (!user_mode(regs)) { - kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code); + kernelmode_fixup_or_oops(regs, error_code, address, + SIGSEGV, si_code, pkey); return; } @@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, { /* Kernel mode? Handle exceptions or die: */ if (!user_mode(regs)) { - kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR); + kernelmode_fixup_or_oops(regs, error_code, address, + SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY); return; } @@ -1396,7 +1403,8 @@ good_area: */ if (!user_mode(regs)) kernelmode_fixup_or_oops(regs, error_code, address, - SIGBUS, BUS_ADRERR); + SIGBUS, BUS_ADRERR, + ARCH_DEFAULT_PKEY); return; } @@ -1416,7 +1424,8 @@ good_area: return; if (fatal_signal_pending(current) && !user_mode(regs)) { - kernelmode_fixup_or_oops(regs, error_code, address, 0, 0); + kernelmode_fixup_or_oops(regs, error_code, address, + 0, 0, ARCH_DEFAULT_PKEY); return; } @@ -1424,7 +1433,8 @@ good_area: /* Kernel mode? Handle exceptions or die: */ if (!user_mode(regs)) { kernelmode_fixup_or_oops(regs, error_code, address, - SIGSEGV, SEGV_MAPERR); + SIGSEGV, SEGV_MAPERR, + ARCH_DEFAULT_PKEY); return; } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a6e11763763f..36098226a957 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr) return 0; p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) + if (!p4d_present(*p4d)) return 0; pud = pud_offset(p4d, addr); - if (pud_none(*pud)) + if (!pud_present(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (!pmd_present(*pmd)) return 0; if (pmd_large(*pmd)) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 60ade7dd71bd..026031b3b782 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -14,7 +14,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mmiotrace.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <linux/efi.h> #include <linux/pgtable.h> @@ -92,7 +92,7 @@ static unsigned int __ioremap_check_ram(struct resource *res) */ static unsigned int __ioremap_check_encrypted(struct resource *res) { - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return 0; switch (res->desc) { @@ -112,7 +112,7 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) */ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) { - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; if (!IS_ENABLED(CONFIG_EFI)) @@ -508,6 +508,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) memunmap((void *)((unsigned long)addr & PAGE_MASK)); } +#ifdef CONFIG_AMD_MEM_ENCRYPT /* * Examine the physical address to determine if it is an area of memory * that should be mapped decrypted. If the memory is not part of the @@ -555,7 +556,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr, case E820_TYPE_NVS: case E820_TYPE_UNUSABLE: /* For SEV, these areas are encrypted */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) break; fallthrough; @@ -693,7 +694,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, unsigned long flags) { - if (!mem_encrypt_active()) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return true; if (flags & MEMREMAP_ENC) @@ -702,7 +703,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, if (flags & MEMREMAP_DEC) return false; - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { if (memremap_is_setup_data(phys_addr, size) || memremap_is_efi_data(phys_addr, size)) return false; @@ -723,12 +724,12 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, { bool encrypted_prot; - if (!mem_encrypt_active()) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return prot; encrypted_prot = true; - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { if (early_memremap_is_setup_data(phys_addr, size) || memremap_is_efi_data(phys_addr, size)) encrypted_prot = false; @@ -746,7 +747,6 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) return arch_memremap_can_ram_remap(phys_addr, size, 0); } -#ifdef CONFIG_AMD_MEM_ENCRYPT /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 1a50434c8a4d..ef885370719a 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, p = early_alloc(PMD_SIZE, nid, false); if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) return; - else if (p) - memblock_free(__pa(p), PMD_SIZE); + memblock_free_ptr(p, PMD_SIZE); } p = early_alloc(PAGE_SIZE, nid, true); @@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, p = early_alloc(PUD_SIZE, nid, false); if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) return; - else if (p) - memblock_free(__pa(p), PUD_SIZE); + memblock_free_ptr(p, PUD_SIZE); } p = early_alloc(PAGE_SIZE, nid, true); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index ff08dc463634..23d54b810f08 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -20,6 +20,7 @@ #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/virtio_config.h> +#include <linux/cc_platform.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -143,7 +144,7 @@ void __init sme_unmap_bootdata(char *real_mode_data) struct boot_params *boot_data; unsigned long cmdline_paddr; - if (!sme_active()) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; /* Get the command line address before unmapping the real_mode_data */ @@ -163,7 +164,7 @@ void __init sme_map_bootdata(char *real_mode_data) struct boot_params *boot_data; unsigned long cmdline_paddr; - if (!sme_active()) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); @@ -193,7 +194,7 @@ void __init sme_early_init(void) for (i = 0; i < ARRAY_SIZE(protection_map); i++) protection_map[i] = pgprot_encrypted(protection_map[i]); - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) swiotlb_force = SWIOTLB_FORCE; } @@ -202,7 +203,7 @@ void __init sev_setup_arch(void) phys_addr_t total_mem = memblock_phys_mem_size(); unsigned long size; - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; /* @@ -360,42 +361,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) return early_set_memory_enc_dec(vaddr, size, true); } -/* - * SME and SEV are very similar but they are not the same, so there are - * times that the kernel will need to distinguish between SME and SEV. The - * sme_active() and sev_active() functions are used for this. When a - * distinction isn't needed, the mem_encrypt_active() function can be used. - * - * The trampoline code is a good example for this requirement. Before - * paging is activated, SME will access all memory as decrypted, but SEV - * will access all memory as encrypted. So, when APs are being brought - * up under SME the trampoline area cannot be encrypted, whereas under SEV - * the trampoline area must be encrypted. - */ -bool sev_active(void) -{ - return sev_status & MSR_AMD64_SEV_ENABLED; -} - -bool sme_active(void) -{ - return sme_me_mask && !sev_active(); -} -EXPORT_SYMBOL_GPL(sev_active); - -/* Needs to be called from non-instrumentable code */ -bool noinstr sev_es_active(void) -{ - return sev_status & MSR_AMD64_SEV_ES_ENABLED; -} - /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) { /* * For SEV, all DMA must be to unencrypted addresses. */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return true; /* @@ -403,7 +375,7 @@ bool force_dma_unencrypted(struct device *dev) * device does not support DMA to addresses that include the * encryption mask. */ - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); @@ -428,7 +400,7 @@ void __init mem_encrypt_free_decrypted_mem(void) * The unused memory range was mapped decrypted, change the encryption * attribute from decrypted to encrypted before freeing it. */ - if (mem_encrypt_active()) { + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { r = set_memory_encrypted(vaddr, npages); if (r) { pr_warn("failed to free unused decrypted pages\n"); @@ -444,7 +416,7 @@ static void print_mem_encrypt_feature_info(void) pr_info("AMD Memory Encryption Features active:"); /* Secure Memory Encryption */ - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { /* * SME is mutually exclusive with any of the SEV * features below. @@ -454,11 +426,11 @@ static void print_mem_encrypt_feature_info(void) } /* Secure Encrypted Virtualization */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) pr_cont(" SEV"); /* Encrypted Register State */ - if (sev_es_active()) + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_cont(" SEV-ES"); pr_cont("\n"); @@ -477,7 +449,8 @@ void __init mem_encrypt_init(void) * With SEV, we need to unroll the rep string I/O instructions, * but SEV-ES supports them through the #VC handler. */ - if (sev_active() && !sev_es_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && + !cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) static_branch_enable(&sev_enable_key); print_mem_encrypt_feature_info(); @@ -485,6 +458,6 @@ void __init mem_encrypt_init(void) int arch_has_restricted_virtio_memory_access(void) { - return sev_active(); + return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); } EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 470b20208430..f8c612902038 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -30,6 +30,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <asm/setup.h> #include <asm/sections.h> @@ -287,7 +288,13 @@ void __init sme_encrypt_kernel(struct boot_params *bp) unsigned long pgtable_area_len; unsigned long decrypted_base; - if (!sme_active()) + /* + * This is early code, use an open coded check for SME instead of + * using cc_platform_has(). This eliminates worries about removing + * instrumentation or checking boot_cpu_data in the cc_platform_has() + * function. + */ + if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) return; /* diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index a1b5c71099e6..1e9b93b088db 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -355,7 +355,7 @@ void __init numa_reset_distance(void) /* numa_distance could be 1LU marking allocation failure, test cnt */ if (numa_distance_cnt) - memblock_free(__pa(numa_distance), size); + memblock_free_ptr(numa_distance, size); numa_distance_cnt = 0; numa_distance = NULL; /* enable table creation */ } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 737491b13728..e801e30089c4 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) } /* free the copied physical distance table */ - if (phys_dist) - memblock_free(__pa(phys_dist), phys_size); + memblock_free_ptr(phys_dist, phys_size); return; no_emu: diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 3112ca7786ed..4ba2a3ee4bce 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, int err = 0; start = sanitize_phys(start); - end = sanitize_phys(end); + + /* + * The end address passed into this function is exclusive, but + * sanitize_phys() expects an inclusive address. + */ + end = sanitize_phys(end - 1) + 1; if (start >= end) { WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, start, end - 1, cattr_name(req_type)); diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index ad8a5c586a35..527957586f3c 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -18,6 +18,7 @@ #include <linux/libnvdimm.h> #include <linux/vmstat.h> #include <linux/kernel.h> +#include <linux/cc_platform.h> #include <asm/e820/api.h> #include <asm/processor.h> @@ -1986,7 +1987,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) int ret; /* Nothing to do if memory encryption is not active */ - if (!mem_encrypt_active()) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return 0; /* Should not be working on unaligned addresses */ |