summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-01 10:32:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-01 10:32:30 -0800
commit6053dc981449718d90a429933e99b441e1adaea6 (patch)
tree2f1f9e7e3b0b7dcaad282f9651b29f52f9433fc3
parentb286cedd473006b33d5ae076afac509e6b2c3bf4 (diff)
parent638f863dbbc8da16834ee0acc6ac10754f79c486 (diff)
downloadlinux-6053dc981449718d90a429933e99b441e1adaea6.tar.bz2
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "The main fix here addresses a kernel panic triggered on Qualcomm QDF2400 due to incorrect register usage in an erratum workaround introduced during the merge window. Summary: - Fix kernel panic on specific Qualcomm platform due to broken erratum workaround - Revert contiguous bit support due to TLB conflict aborts in simulation - Don't treat all CPU ID register fields as 4-bit quantities" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/cpufeature: check correct field width when updating sys_val Revert "arm64: mm: set the contiguous bit for kernel mappings where appropriate" arm64: Avoid clobbering mm in erratum workaround on QDF2400
-rw-r--r--arch/arm64/include/asm/cpufeature.h14
-rw-r--r--arch/arm64/mm/mmu.c34
-rw-r--r--arch/arm64/mm/proc.S2
3 files changed, 15 insertions, 35 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 4ce82ed3e7c3..05310ad8c5ab 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -184,16 +184,22 @@ static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
}
static inline int __attribute_const__
-cpuid_feature_extract_field(u64 features, int field, bool sign)
+cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
{
return (sign) ?
- cpuid_feature_extract_signed_field(features, field) :
- cpuid_feature_extract_unsigned_field(features, field);
+ cpuid_feature_extract_signed_field_width(features, field, width) :
+ cpuid_feature_extract_unsigned_field_width(features, field, width);
+}
+
+static inline int __attribute_const__
+cpuid_feature_extract_field(u64 features, int field, bool sign)
+{
+ return cpuid_feature_extract_field_width(features, field, 4, sign);
}
static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
{
- return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign);
+ return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
}
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b805c017f789..d28dbcf596b6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -109,10 +109,8 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
- bool page_mappings_only)
+ phys_addr_t (*pgtable_alloc)(void))
{
- pgprot_t __prot = prot;
pte_t *pte;
BUG_ON(pmd_sect(*pmd));
@@ -130,18 +128,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
do {
pte_t old_pte = *pte;
- /*
- * Set the contiguous bit for the subsequent group of PTEs if
- * its size and alignment are appropriate.
- */
- if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
- if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
- __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
- else
- __prot = prot;
- }
-
- set_pte(pte, pfn_pte(pfn, __prot));
+ set_pte(pte, pfn_pte(pfn, prot));
pfn++;
/*
@@ -160,7 +147,6 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t (*pgtable_alloc)(void),
bool page_mappings_only)
{
- pgprot_t __prot = prot;
pmd_t *pmd;
unsigned long next;
@@ -187,18 +173,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
!page_mappings_only) {
- /*
- * Set the contiguous bit for the subsequent group of
- * PMDs if its size and alignment are appropriate.
- */
- if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
- if (end - addr >= CONT_PMD_SIZE)
- __prot = __pgprot(pgprot_val(prot) |
- PTE_CONT);
- else
- __prot = prot;
- }
- pmd_set_huge(pmd, phys, __prot);
+ pmd_set_huge(pmd, phys, prot);
/*
* After the PMD entry has been populated once, we
@@ -208,8 +183,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd_val(*pmd)));
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, pgtable_alloc,
- page_mappings_only);
+ prot, pgtable_alloc);
BUG_ON(pmd_val(old_pmd) != 0 &&
pmd_val(old_pmd) != pmd_val(*pmd));
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cd4d53d7e458..877d42fb0df6 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -138,7 +138,7 @@ ENDPROC(cpu_do_resume)
* - pgd_phys - physical address of new TTB
*/
ENTRY(cpu_do_switch_mm)
- pre_ttbr0_update_workaround x0, x1, x2
+ pre_ttbr0_update_workaround x0, x2, x3
mmid x1, x1 // get mm->context.id
bfi x0, x1, #48, #16 // set the ASID
msr ttbr0_el1, x0 // set TTBR0