summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/context.c38
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/fault.c28
-rw-r--r--arch/arm64/mm/mmu.c91
4 files changed, 78 insertions, 114 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index f636a2639f03..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
__flush_icache_all();
}
-static int is_reserved_asid(u64 asid)
+static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
- for_each_possible_cpu(cpu)
- if (per_cpu(reserved_asids, cpu) == asid)
- return 1;
- return 0;
+ bool hit = false;
+
+ /*
+ * Iterate over the set of reserved ASIDs looking for a match.
+ * If we find one, then we can update our mm to use newasid
+ * (i.e. the same ASID in the current generation) but we can't
+ * exit the loop early, since we need to ensure that all copies
+ * of the old ASID are updated to reflect the mm. Failure to do
+ * so could result in us missing the reserved ASID in a future
+ * generation.
+ */
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(reserved_asids, cpu) == asid) {
+ hit = true;
+ per_cpu(reserved_asids, cpu) = newasid;
+ }
+ }
+
+ return hit;
}
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK);
+
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
- if (is_reserved_asid(asid))
- return generation | (asid & ~ASID_MASK);
+ if (check_update_reserved_asid(asid, newasid))
+ return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
*/
asid &= ~ASID_MASK;
if (!__test_and_set_bit(asid, asid_map))
- goto bump_gen;
+ return newasid;
}
/*
@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
-
-bump_gen:
- asid |= generation;
- return asid;
+ return asid | generation;
}
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199114b4..7963aa4b5d28 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
*/
#include <linux/gfp.h>
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
@@ -28,9 +29,6 @@
#include <asm/cacheflush.h>
-struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
- int ret;
-
- dma_ops = &swiotlb_dma_ops;
-
- ret = atomic_pool_init();
-
- return ret;
+ return atomic_pool_init();
}
arch_initcall(arm64_dma_init);
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+ size_t iosize = size;
void *addr;
if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
return NULL;
+
+ size = PAGE_ALIGN(size);
+
/*
* Some drivers rely on this, and we probably don't want the
* possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
- pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
+ pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
flush_page);
if (!pages)
return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
- iommu_dma_free(dev, pages, size, handle);
+ iommu_dma_free(dev, pages, iosize, handle);
} else {
struct page *page;
/*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
if (!addr)
return NULL;
- *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+ *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
if (iommu_dma_mapping_error(dev, *handle)) {
if (coherent)
__free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
+ size_t iosize = size;
+
+ size = PAGE_ALIGN(size);
/*
* @cpu_addr will be one of 3 things depending on how it was allocated:
* - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* Hence how dodgy the below logic looks...
*/
if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_from_pool(cpu_addr, size);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
if (WARN_ON(!area || !area->pages))
return;
- iommu_dma_free(dev, area->pages, size, &handle);
+ iommu_dma_free(dev, area->pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
- iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_pages(virt_to_page(cpu_addr), get_order(size));
}
}
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
- if (!acpi_disabled && !dev->archdata.dma_ops)
- dev->archdata.dma_ops = dma_ops;
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 19211c4a8911..92ddac1e8ca2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -393,16 +393,16 @@ static struct fault_info {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
- { do_bad, SIGBUS, 0, "reserved access flag fault" },
+ { do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
- { do_bad, SIGBUS, 0, "reserved permission fault" },
+ { do_bad, SIGBUS, 0, "unknown 12" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
- { do_bad, SIGBUS, 0, "asynchronous external abort" },
+ { do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
@@ -410,16 +410,16 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
- { do_bad, SIGBUS, 0, "asynchronous parity error" },
+ { do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
- { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "unknown 32" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
- { do_bad, SIGBUS, 0, "debug event" },
+ { do_bad, SIGBUS, 0, "unknown 34" },
{ do_bad, SIGBUS, 0, "unknown 35" },
{ do_bad, SIGBUS, 0, "unknown 36" },
{ do_bad, SIGBUS, 0, "unknown 37" },
@@ -433,21 +433,21 @@ static struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 45" },
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
- { do_bad, SIGBUS, 0, "unknown 48" },
+ { do_bad, SIGBUS, 0, "TLB conflict abort" },
{ do_bad, SIGBUS, 0, "unknown 49" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
- { do_bad, SIGBUS, 0, "unknown 53" },
+ { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
{ do_bad, SIGBUS, 0, "unknown 54" },
{ do_bad, SIGBUS, 0, "unknown 55" },
{ do_bad, SIGBUS, 0, "unknown 56" },
{ do_bad, SIGBUS, 0, "unknown 57" },
- { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
+ { do_bad, SIGBUS, 0, "unknown 58" },
{ do_bad, SIGBUS, 0, "unknown 59" },
{ do_bad, SIGBUS, 0, "unknown 60" },
- { do_bad, SIGBUS, 0, "unknown 61" },
- { do_bad, SIGBUS, 0, "unknown 62" },
+ { do_bad, SIGBUS, 0, "section domain fault" },
+ { do_bad, SIGBUS, 0, "page domain fault" },
{ do_bad, SIGBUS, 0, "unknown 63" },
};
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c81c48..873e363048c6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
- void *ptr = __va(memblock_alloc(sz, sz));
- BUG_ON(!ptr);
+ phys_addr_t phys;
+ void *ptr;
+
+ phys = memblock_alloc(sz, sz);
+ BUG_ON(!phys);
+ ptr = __va(phys);
memset(ptr, 0, sz);
return ptr;
}
@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
do {
/*
* Need to have the least restrictive permissions available
- * permissions will be fixed up later. Default the new page
- * range as contiguous ptes.
+ * permissions will be fixed up later
*/
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn++;
} while (pte++, i++, i < PTRS_PER_PTE);
}
-/*
- * Given a PTE with the CONT bit set, determine where the CONT range
- * starts, and clear the entire range of PTE CONT bits.
- */
-static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
-{
- int i;
-
- pte -= CONT_RANGE_OFFSET(addr);
- for (i = 0; i < CONT_PTES; i++) {
- set_pte(pte, pte_mknoncont(*pte));
- pte++;
- }
- flush_tlb_all();
-}
-
-/*
- * Given a range of PTEs set the pfn and provided page protection flags
- */
-static void __populate_init_pte(pte_t *pte, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- pgprot_t prot)
-{
- unsigned long pfn = __phys_to_pfn(phys);
-
- do {
- /* clear all the bits except the pfn, then apply the prot */
- set_pte(pte, pfn_pte(pfn, prot));
- pte++;
- pfn++;
- addr += PAGE_SIZE;
- } while (addr != end);
-}
-
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys,
+ unsigned long end, unsigned long pfn,
pgprot_t prot,
void *(*alloc)(unsigned long size))
{
pte_t *pte;
- unsigned long next;
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- next = min(end, (addr + CONT_SIZE) & CONT_MASK);
- if (((addr | next | phys) & ~CONT_MASK) == 0) {
- /* a block of CONT_PTES */
- __populate_init_pte(pte, addr, next, phys,
- __pgprot(pgprot_val(prot) | PTE_CONT));
- } else {
- /*
- * If the range being split is already inside of a
- * contiguous range but this PTE isn't going to be
- * contiguous, then we want to unmark the adjacent
- * ranges, then update the portion of the range we
- * are interrested in.
- */
- clear_cont_pte_range(pte, addr);
- __populate_init_pte(pte, addr, next, phys, prot);
- }
-
- pte += (next - addr) >> PAGE_SHIFT;
- phys += next - addr;
- addr = next;
- } while (addr != end);
+ set_pte(pte, pfn_pte(pfn, prot));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
}
static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
}
}
} else {
- alloc_init_pte(pmd, addr, next, phys, prot, alloc);
+ alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
+ prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
@@ -362,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
* for now. This will get more fine grained later once all memory
* is mapped
*/
- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
if (end < kernel_x_start) {
create_mapping(start, __phys_to_virt(start),
@@ -451,18 +402,18 @@ static void __init fixup_executable(void)
{
#ifdef CONFIG_DEBUG_RODATA
/* now that we are actually fully mapped, make the start/end more fine grained */
- if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+ if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
unsigned long aligned_start = round_down(__pa(_stext),
- SECTION_SIZE);
+ SWAPPER_BLOCK_SIZE);
create_mapping(aligned_start, __phys_to_virt(aligned_start),
__pa(_stext) - aligned_start,
PAGE_KERNEL);
}
- if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+ if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
unsigned long aligned_end = round_up(__pa(__init_end),
- SECTION_SIZE);
+ SWAPPER_BLOCK_SIZE);
create_mapping(__pa(__init_end), (unsigned long)__init_end,
aligned_end - __pa(__init_end),
PAGE_KERNEL);
@@ -475,7 +426,7 @@ void mark_rodata_ro(void)
{
create_mapping_late(__pa(_stext), (unsigned long)_stext,
(unsigned long)_etext - (unsigned long)_stext,
- PAGE_KERNEL_EXEC | PTE_RDONLY);
+ PAGE_KERNEL_ROX);
}
#endif