summaryrefslogtreecommitdiffstats
path: root/arch/xtensa/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include/asm')
-rw-r--r--arch/xtensa/include/asm/bitops.h2
-rw-r--r--arch/xtensa/include/asm/cacheasm.h11
-rw-r--r--arch/xtensa/include/asm/fixmap.h5
-rw-r--r--arch/xtensa/include/asm/highmem.h5
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h44
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h74
-rw-r--r--arch/xtensa/include/asm/page.h27
-rw-r--r--arch/xtensa/include/asm/pgtable.h7
-rw-r--r--arch/xtensa/include/asm/platform.h6
-rw-r--r--arch/xtensa/include/asm/processor.h2
-rw-r--r--arch/xtensa/include/asm/sysmem.h21
-rw-r--r--arch/xtensa/include/asm/vectors.h67
12 files changed, 167 insertions, 104 deletions
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 3f44fa2a53e9..d3490189792b 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -48,7 +48,7 @@ static inline int ffz(unsigned long x)
* __ffs: Find first bit set in word. Return 0 for bit 0
*/
-static inline int __ffs(unsigned long x)
+static inline unsigned long __ffs(unsigned long x)
{
return 31 - __cntlz(x & -x);
}
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index e0f9e1109c83..2041abb10a23 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -69,26 +69,23 @@
.endm
-#if XCHAL_DCACHE_LINE_LOCKABLE
-
.macro ___unlock_dcache_all ar at
-#if XCHAL_DCACHE_SIZE
+#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
#endif
.endm
-#endif
-
-#if XCHAL_ICACHE_LINE_LOCKABLE
.macro ___unlock_icache_all ar at
+#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
-#endif
+
.macro ___flush_invalidate_dcache_all ar at
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 62b507deea9d..0d30403b6c95 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -59,6 +59,11 @@ enum fixed_addresses {
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
+ /* Check if this memory layout is broken because fixmap overlaps page
+ * table.
+ */
+ BUILD_BUG_ON(FIXADDR_START <
+ XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 01cef6b40829..6e070db1022e 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -68,6 +68,11 @@ void kunmap_high(struct page *page);
static inline void *kmap(struct page *page)
{
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE <
+ XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 7a1e075969a3..42410f253597 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -77,13 +77,16 @@
.align 4
1: movi a2, 0x10000000
- movi a3, 0x18000000
- add a2, a2, a0
-9: bgeu a2, a3, 9b /* PC is out of the expected range */
+
+#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
+#define TEMP_MAPPING_VADDR 0x40000000
+#else
+#define TEMP_MAPPING_VADDR 0x00000000
+#endif
/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
- movi a2, 0x40000000 | XCHAL_SPANNING_WAY
+ movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
idtlb a2
iitlb a2
isync
@@ -95,14 +98,14 @@
srli a3, a0, 27
slli a3, a3, 27
addi a3, a3, CA_BYPASS
- addi a7, a2, -1
+ addi a7, a2, 5 - XCHAL_SPANNING_WAY
wdtlb a3, a7
witlb a3, a7
isync
slli a4, a0, 5
srli a4, a4, 5
- addi a5, a2, -6
+ addi a5, a2, -XCHAL_SPANNING_WAY
add a4, a4, a5
jx a4
@@ -116,35 +119,48 @@
add a5, a5, a4
bne a5, a2, 3b
- /* Step 4: Setup MMU with the old V2 mappings. */
+ /* Step 4: Setup MMU with the requested static mappings. */
+
movi a6, 0x01000000
wsr a6, ITLBCFG
wsr a6, DTLBCFG
isync
- movi a5, 0xd0000005
- movi a4, CA_WRITEBACK
+ movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
- movi a5, 0xd8000005
- movi a4, CA_BYPASS
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
- movi a5, XCHAL_KIO_CACHED_VADDR + 6
+#ifdef CONFIG_XTENSA_KSEG_512M
+ movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
+ movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+#endif
+
+ movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
- movi a5, XCHAL_KIO_BYPASS_VADDR + 6
+ movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
isync
- /* Jump to self, using MMU v2 mappings. */
+ /* Jump to self, using final mappings. */
movi a4, 1f
jx a4
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
new file mode 100644
index 000000000000..561f8729bcde
--- /dev/null
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -0,0 +1,74 @@
+/*
+ * Kernel virtual memory layout definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_KMEM_LAYOUT_H
+#define _XTENSA_KMEM_LAYOUT_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Fixed TLB translations in the processor.
+ */
+
+#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
+#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
+
+#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
+#define XCHAL_KSEG_TLB_WAY 5
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_256M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#elif defined(CONFIG_XTENSA_KSEG_512M)
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
+#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
+#define XCHAL_KSEG_TLB_WAY 6
+#define XCHAL_KIO_TLB_WAY 6
+
+#else
+#error Unsupported KSEG configuration
+#endif
+
+#ifdef CONFIG_KSEG_PADDR
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
+#else
+#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
+#endif
+
+#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
+#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
+#endif
+
+#else
+
+#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
+#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
+#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index ad38500471fa..976b1d70edbc 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -15,15 +15,7 @@
#include <asm/types.h>
#include <asm/cache.h>
#include <platform/hardware.h>
-
-/*
- * Fixed TLB translations in the processor.
- */
-
-#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
-#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
-#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
-#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
+#include <asm/kmem_layout.h>
/*
* PAGE_SHIFT determines the page size
@@ -35,10 +27,13 @@
#ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
-#define MAX_MEM_PFN XCHAL_KSEG_SIZE
+#define PHYS_OFFSET XCHAL_KSEG_PADDR
+#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
+ PHYS_PFN(XCHAL_KSEG_SIZE))
#else
-#define PAGE_OFFSET __XTENSA_UL_CONST(0)
-#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START
+#define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START
+#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
#define PGTABLE_START 0x80000000
@@ -167,10 +162,12 @@ void copy_user_highpage(struct page *to, struct page *from,
* addresses.
*/
-#define ARCH_PFN_OFFSET (PLATFORM_DEFAULT_MEM_START >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
-#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
+#define __pa(x) \
+ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __va(x) \
+ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index fb02fdc5ecee..8aa0e0d9cbb2 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -13,6 +13,7 @@
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
+#include <asm/kmem_layout.h>
/*
* We only use two ring levels, user and kernel space.
@@ -68,9 +69,9 @@
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
*/
-#define VMALLOC_START 0xC0000000
-#define VMALLOC_END 0xC7FEFFFF
-#define TLBTEMP_BASE_1 0xC7FF0000
+#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
+#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 32e98f27ce97..f8fbef67bc5f 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -69,4 +69,10 @@ extern int platform_pcibios_fixup (void);
*/
extern void platform_calibrate_ccount (void);
+/*
+ * Flush and reset the mmu, simulate a processor reset, and
+ * jump to the reset vector.
+ */
+void cpu_reset(void) __attribute__((noreturn));
+
#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index d2e40d39c615..b42d68bfe3cf 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -37,7 +37,7 @@
#ifdef CONFIG_MMU
#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
#else
-#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
+#define TASK_SIZE __XTENSA_UL_CONST(0xffffffff)
#endif
#define STACK_TOP TASK_SIZE
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
index c015c5c8e3f7..552cdfd8590e 100644
--- a/arch/xtensa/include/asm/sysmem.h
+++ b/arch/xtensa/include/asm/sysmem.h
@@ -11,27 +11,8 @@
#ifndef _XTENSA_SYSMEM_H
#define _XTENSA_SYSMEM_H
-#define SYSMEM_BANKS_MAX 31
+#include <linux/memblock.h>
-struct meminfo {
- unsigned long start;
- unsigned long end;
-};
-
-/*
- * Bank array is sorted by .start.
- * Banks don't overlap and there's at least one page gap
- * between adjacent bank entries.
- */
-struct sysmem_info {
- int nr_banks;
- struct meminfo bank[SYSMEM_BANKS_MAX];
-};
-
-extern struct sysmem_info sysmem;
-
-int add_sysmem_bank(unsigned long start, unsigned long end);
-int mem_reserve(unsigned long, unsigned long, int);
void bootmem_init(void);
void zones_init(void);
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 288c776736d3..77d41cc7a688 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -20,6 +20,7 @@
#include <variant/core.h>
#include <platform/hardware.h>
+#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
@@ -47,61 +48,42 @@ static inline unsigned long xtensa_get_kio_paddr(void)
#if defined(CONFIG_MMU)
-/* Will Become VECBASE */
-#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
-
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
/* Image Virtual Start Address */
-#define KERNELOFFSET 0xD0003000
-
-#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
- /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
- #define LOAD_MEMORY_ADDRESS 0x00003000
+#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
+ CONFIG_KERNEL_LOAD_ADDRESS - \
+ XCHAL_KSEG_PADDR)
#else
- /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
- #define LOAD_MEMORY_ADDRESS 0xD0003000
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
-#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \
- XCHAL_RESET_VECTOR1_PADDR)
-
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
- /* VECBASE */
- #define VIRTUAL_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x2000)
+/* Location of the start of the kernel text, _start */
+#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
- /* Location of the start of the kernel text, _start */
- #define KERNELOFFSET (PLATFORM_DEFAULT_MEM_START + 0x3000)
-
- /* Loaded just above possibly live vectors */
- #define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
-
-#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#endif /* CONFIG_MMU */
-#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
-
-/* Used to set VECBASE register */
-#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
-#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
-#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
-#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
-#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
-#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
-#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
-#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
-#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
-#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
-
-#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
+#define VECTOR_VADDR(offset) (VECBASE_VADDR + offset)
-#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
-
-#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define USER_VECTOR_VADDR VECTOR_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR VECTOR_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR VECTOR_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR VECTOR_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL6_VECOFS)
+#define INTLEVEL7_VECTOR_VADDR VECTOR_VADDR(XCHAL_INTLEVEL7_VECOFS)
+#define DEBUG_VECTOR_VADDR VECTOR_VADDR(XCHAL_DEBUG_VECOFS)
/*
* These XCHAL_* #defines from varian/core.h
@@ -109,7 +91,6 @@ static inline unsigned long xtensa_get_kio_paddr(void)
* constants are defined above and should be used.
*/
#undef XCHAL_VECBASE_RESET_VADDR
-#undef XCHAL_RESET_VECTOR0_VADDR
#undef XCHAL_USER_VECTOR_VADDR
#undef XCHAL_KERNEL_VECTOR_VADDR
#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
@@ -119,9 +100,8 @@ static inline unsigned long xtensa_get_kio_paddr(void)
#undef XCHAL_INTLEVEL4_VECTOR_VADDR
#undef XCHAL_INTLEVEL5_VECTOR_VADDR
#undef XCHAL_INTLEVEL6_VECTOR_VADDR
-#undef XCHAL_DEBUG_VECTOR_VADDR
-#undef XCHAL_NMI_VECTOR_VADDR
#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
#else
@@ -134,6 +114,7 @@ static inline unsigned long xtensa_get_kio_paddr(void)
#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define INTLEVEL7_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
#endif