summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/include/asm/hypervisor.h12
-rw-r--r--arch/sparc/include/asm/io_64.h20
-rw-r--r--arch/sparc/include/asm/iommu_64.h7
-rw-r--r--arch/sparc/include/asm/jump_label.h5
-rw-r--r--arch/sparc/include/asm/seccomp.h11
-rw-r--r--arch/sparc/include/asm/starfire.h1
-rw-r--r--arch/sparc/include/asm/thread_info_32.h15
-rw-r--r--arch/sparc/include/asm/thread_info_64.h26
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/iommu.c172
-rw-r--r--arch/sparc/kernel/iommu_common.h8
-rw-r--r--arch/sparc/kernel/ldc.c153
-rw-r--r--arch/sparc/kernel/leon_pci.c16
-rw-r--r--arch/sparc/kernel/mdesc.c22
-rw-r--r--arch/sparc/kernel/pci.c8
-rw-r--r--arch/sparc/kernel/pci_sun4v.c183
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/pcr.c33
-rw-r--r--arch/sparc/kernel/perf_event.c55
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/smp_64.c27
-rw-r--r--arch/sparc/kernel/starfire.c5
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/time_32.c6
-rw-r--r--arch/sparc/kernel/traps_32.c1
-rw-r--r--arch/sparc/kernel/traps_64.c32
-rw-r--r--arch/sparc/lib/memmove.S35
-rw-r--r--arch/sparc/mm/init_64.c2
31 files changed, 429 insertions, 464 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 96ac69c5eba0..e49502acbab4 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG
default "arch/sparc/configs/sparc32_defconfig" if SPARC32
default "arch/sparc/configs/sparc64_defconfig" if SPARC64
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+
config IOMMU_HELPER
bool
default y if SPARC64
@@ -143,6 +146,10 @@ config GENERIC_ISA_DMA
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64
+config PGTABLE_LEVELS
+ default 4 if 64BIT
+ default 3
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 4f6725ff4c33..f5b6537306f0 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
+
+#define HV_FAST_M7_GET_PERFREG 0x43
+#define HV_FAST_M7_SET_PERFREG 0x44
+
+#ifndef __ASSEMBLY__
+unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
+ unsigned long *reg_val);
+unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
+#endif
+
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
+#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
#define HV_GRP_N2_CPU 0x0202
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9b672be70dda..50d4840d9aeb 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr)
{
}
-#define ioread8(X) readb(X)
-#define ioread16(X) readw(X)
-#define ioread16be(X) __raw_readw(X)
-#define ioread32(X) readl(X)
-#define ioread32be(X) __raw_readl(X)
-#define iowrite8(val,X) writeb(val,X)
-#define iowrite16(val,X) writew(val,X)
-#define iowrite16be(val,X) __raw_writew(val,X)
-#define iowrite32(val,X) writel(val,X)
-#define iowrite32be(val,X) __raw_writel(val,X)
+#define ioread8 readb
+#define ioread16 readw
+#define ioread16be __raw_readw
+#define ioread32 readl
+#define ioread32be __raw_readl
+#define iowrite8 writeb
+#define iowrite16 writew
+#define iowrite16be __raw_writew
+#define iowrite32 writel
+#define iowrite32be __raw_writel
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index 2b9321ab064d..cd0d69fa7592 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -16,6 +16,7 @@
#define IOPTE_WRITE 0x0000000000000002UL
#define IOMMU_NUM_CTXS 4096
+#include <linux/iommu-common.h>
struct iommu_arena {
unsigned long *map;
@@ -24,11 +25,10 @@ struct iommu_arena {
};
struct iommu {
+ struct iommu_map_table tbl;
spinlock_t lock;
- struct iommu_arena arena;
- void (*flush_all)(struct iommu *);
+ u32 dma_addr_mask;
iopte_t *page_table;
- u32 page_table_map_base;
unsigned long iommu_control;
unsigned long iommu_tsbbase;
unsigned long iommu_flush;
@@ -40,7 +40,6 @@ struct iommu {
unsigned long dummy_page_pa;
unsigned long ctx_lowest_free;
DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
- u32 dma_addr_mask;
};
struct strbuf {
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index ec2e2e2aba7d..cc9b04a2b11b 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -1,7 +1,7 @@
#ifndef _ASM_SPARC_JUMP_LABEL_H
#define _ASM_SPARC_JUMP_LABEL_H
-#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -22,8 +22,6 @@ l_yes:
return true;
}
-#endif /* __KERNEL__ */
-
typedef u32 jump_label_t;
struct jump_entry {
@@ -32,4 +30,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/sparc/include/asm/seccomp.h b/arch/sparc/include/asm/seccomp.h
index adca1bce41d4..5ef8826d44f8 100644
--- a/arch/sparc/include/asm/seccomp.h
+++ b/arch/sparc/include/asm/seccomp.h
@@ -1,15 +1,10 @@
#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
#include <linux/unistd.h>
-#define __NR_seccomp_read __NR_read
-#define __NR_seccomp_write __NR_write
-#define __NR_seccomp_exit __NR_exit
-#define __NR_seccomp_sigreturn __NR_rt_sigreturn
-
-#define __NR_seccomp_read_32 __NR_read
-#define __NR_seccomp_write_32 __NR_write
-#define __NR_seccomp_exit_32 __NR_exit
#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+#include <asm-generic/seccomp.h>
+
#endif /* _ASM_SECCOMP_H */
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h
index c100dc27a0a9..176fa0ad19f1 100644
--- a/arch/sparc/include/asm/starfire.h
+++ b/arch/sparc/include/asm/starfire.h
@@ -12,7 +12,6 @@
extern int this_is_starfire;
void check_if_starfire(void);
-int starfire_hard_smp_processor_id(void);
void starfire_hookup(int);
unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fd7bd0a440ca..229475f0d7ce 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -27,7 +27,6 @@
struct thread_info {
unsigned long uwinmask;
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
@@ -35,6 +34,8 @@ struct thread_info {
int softirq_count;
int hardirq_count;
+ u32 __unused;
+
/* Context switch saved kernel state. */
unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
unsigned long kpc;
@@ -56,7 +57,6 @@ struct thread_info {
{ \
.uwinmask = 0, \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -85,12 +85,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
*/
#define TI_UWINMASK 0x00 /* uwinmask */
#define TI_TASK 0x04
-#define TI_EXECDOMAIN 0x08 /* exec_domain */
-#define TI_FLAGS 0x0c
-#define TI_CPU 0x10
-#define TI_PREEMPT 0x14 /* preempt_count */
-#define TI_SOFTIRQ 0x18 /* softirq_count */
-#define TI_HARDIRQ 0x1c /* hardirq_count */
+#define TI_FLAGS 0x08
+#define TI_CPU 0x0c
+#define TI_PREEMPT 0x10 /* preempt_count */
+#define TI_SOFTIRQ 0x14 /* softirq_count */
+#define TI_HARDIRQ 0x18 /* hardirq_count */
#define TI_KSP 0x20 /* ksp */
#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
#define TI_KPSR 0x28 /* kpsr */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index ff455164732a..bde59825d06c 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -31,7 +31,6 @@
#include <asm/types.h>
struct task_struct;
-struct exec_domain;
struct thread_info {
/* D$ line 1 */
@@ -44,7 +43,6 @@ struct thread_info {
/* D$ line 2 */
unsigned long fault_address;
struct pt_regs *kregs;
- struct exec_domain *exec_domain;
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u8 new_child;
__u8 current_ds;
@@ -80,18 +78,17 @@ struct thread_info {
#define TI_KSP 0x00000018
#define TI_FAULT_ADDR 0x00000020
#define TI_KREGS 0x00000028
-#define TI_EXEC_DOMAIN 0x00000030
-#define TI_PRE_COUNT 0x00000038
-#define TI_NEW_CHILD 0x0000003c
-#define TI_CURRENT_DS 0x0000003d
-#define TI_CPU 0x0000003e
-#define TI_UTRAPS 0x00000040
-#define TI_REG_WINDOW 0x00000048
-#define TI_RWIN_SPTRS 0x000003c8
-#define TI_GSR 0x00000400
-#define TI_XFSR 0x00000438
-#define TI_KUNA_REGS 0x00000470
-#define TI_KUNA_INSN 0x00000478
+#define TI_PRE_COUNT 0x00000030
+#define TI_NEW_CHILD 0x00000034
+#define TI_CURRENT_DS 0x00000035
+#define TI_CPU 0x00000036
+#define TI_UTRAPS 0x00000038
+#define TI_REG_WINDOW 0x00000040
+#define TI_RWIN_SPTRS 0x000003c0
+#define TI_GSR 0x000003f8
+#define TI_XFSR 0x00000430
+#define TI_KUNA_REGS 0x00000468
+#define TI_KUNA_INSN 0x00000470
#define TI_FPREGS 0x00000480
/* We embed this in the uppermost byte of thread_info->flags */
@@ -119,7 +116,6 @@ struct thread_info {
{ \
.task = &tsk, \
.current_ds = ASI_P, \
- .exec_domain = &default_exec_domain, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index 88d322b67fac..07cc49e541f4 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs,
void do_privop(struct pt_regs *regs);
void do_privact(struct pt_regs *regs);
void do_cee(struct pt_regs *regs);
-void do_cee_tl1(struct pt_regs *regs);
-void do_dae_tl1(struct pt_regs *regs);
-void do_iae_tl1(struct pt_regs *regs);
void do_div0_tl1(struct pt_regs *regs);
-void do_fpdis_tl1(struct pt_regs *regs);
void do_fpieee_tl1(struct pt_regs *regs);
void do_fpother_tl1(struct pt_regs *regs);
void do_ill_tl1(struct pt_regs *regs);
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 5c55145bfbf0..662500fa555f 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -48,6 +48,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
+ { .group = HV_GRP_M7_PERF, },
};
static DEFINE_SPINLOCK(hvapi_lock);
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index caedf8320416..afbaba52d2f1 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
retl
nop
ENDPROC(sun4v_t5_set_perfreg)
+
+ENTRY(sun4v_m7_get_perfreg)
+ mov %o1, %o4
+ mov HV_FAST_M7_GET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ENDPROC(sun4v_m7_get_perfreg)
+
+ENTRY(sun4v_m7_set_perfreg)
+ mov HV_FAST_M7_SET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_m7_set_perfreg)
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index bfa4d0c2df42..5320689c06e9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
+#include <linux/iommu-common.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@@ -45,8 +46,9 @@
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
-static void iommu_flushall(struct iommu *iommu)
+static void iommu_flushall(struct iommu_map_table *iommu_map_table)
{
+ struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
@@ -87,94 +89,6 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val;
}
-/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
- * facility it must all be done in one pass while under the iommu lock.
- *
- * On sun4u platforms, we only flush the IOMMU once every time we've passed
- * over the entire page table doing allocations. Therefore we only ever advance
- * the hint and cannot backtrack it.
- */
-unsigned long iommu_range_alloc(struct device *dev,
- struct iommu *iommu,
- unsigned long npages,
- unsigned long *handle)
-{
- unsigned long n, end, start, limit, boundary_size;
- struct iommu_arena *arena = &iommu->arena;
- int pass = 0;
-
- /* This allocator was derived from x86_64's bit string search */
-
- /* Sanity check */
- if (unlikely(npages == 0)) {
- if (printk_ratelimit())
- WARN_ON(1);
- return DMA_ERROR_CODE;
- }
-
- if (handle && *handle)
- start = *handle;
- else
- start = arena->hint;
-
- limit = arena->limit;
-
- /* The case below can happen if we have a small segment appended
- * to a large, or when the previous alloc was at the very end of
- * the available space. If so, go back to the beginning and flush.
- */
- if (start >= limit) {
- start = 0;
- if (iommu->flush_all)
- iommu->flush_all(iommu);
- }
-
- again:
-
- if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << IO_PAGE_SHIFT);
- else
- boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
-
- n = iommu_area_alloc(arena->map, limit, start, npages,
- iommu->page_table_map_base >> IO_PAGE_SHIFT,
- boundary_size >> IO_PAGE_SHIFT, 0);
- if (n == -1) {
- if (likely(pass < 1)) {
- /* First failure, rescan from the beginning. */
- start = 0;
- if (iommu->flush_all)
- iommu->flush_all(iommu);
- pass++;
- goto again;
- } else {
- /* Second failure, give up */
- return DMA_ERROR_CODE;
- }
- }
-
- end = n + npages;
-
- arena->hint = end;
-
- /* Update handle for SG allocations */
- if (handle)
- *handle = end;
-
- return n;
-}
-
-void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long entry;
-
- entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
-
- bitmap_clear(arena->map, entry, npages);
-}
-
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
@@ -187,22 +101,20 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
+ iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
- if (!iommu->arena.map) {
- printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
+ iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
+ if (!iommu->tbl.map)
return -ENOMEM;
- }
- memset(iommu->arena.map, 0, sz);
- iommu->arena.limit = num_tsb_entries;
+ memset(iommu->tbl.map, 0, sz);
- if (tlb_type != hypervisor)
- iommu->flush_all = iommu_flushall;
+ iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
+ (tlb_type != hypervisor ? iommu_flushall : NULL),
+ false, 1, false);
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
@@ -235,18 +147,20 @@ out_free_dummy_page:
iommu->dummy_page = 0UL;
out_free_map:
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->tbl.map);
+ iommu->tbl.map = NULL;
return -ENOMEM;
}
-static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
+static inline iopte_t *alloc_npages(struct device *dev,
+ struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE))
return NULL;
@@ -284,7 +198,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
- unsigned long flags, order, first_page;
+ unsigned long order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
@@ -306,16 +220,14 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
- *dma_addrp = (iommu->page_table_map_base +
+ *dma_addrp = (iommu->tbl.table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
@@ -336,16 +248,12 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
struct iommu *iommu;
- unsigned long flags, order, npages;
+ unsigned long order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, dvma, npages);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
order = get_order(size);
if (order < 10)
@@ -375,8 +283,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
+ spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
@@ -385,7 +293,7 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
if (unlikely(!base))
goto bad;
- bus_addr = (iommu->page_table_map_base +
+ bus_addr = (iommu->tbl.table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -496,7 +404,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
@@ -515,11 +423,10 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
- iommu_range_free(iommu, bus_addr, npages);
-
iommu_free_ctx(iommu, ctx);
-
spin_unlock_irqrestore(&iommu->lock, flags);
+
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -567,7 +474,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
@@ -581,7 +488,8 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_range_alloc(dev, iommu, npages, &handle);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ &handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -594,7 +502,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->page_table_map_base +
+ dma_addr = iommu->tbl.table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
@@ -654,15 +562,17 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_range_free(iommu, vaddr, npages);
- entry = (vaddr - iommu->page_table_map_base)
+ entry = (vaddr - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
+ iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ DMA_ERROR_CODE);
+
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
@@ -684,10 +594,11 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
+ struct iommu_map_table *tbl = &iommu->tbl;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
@@ -723,9 +634,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- iommu_range_free(iommu, dma_handle, npages);
- entry = ((dma_handle - iommu->page_table_map_base)
+ entry = ((dma_handle - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
@@ -737,6 +647,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
+ iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ DMA_ERROR_CODE);
sg = sg_next(sg);
}
@@ -770,9 +682,10 @@ static void dma_4u_sync_single_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
+ struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
+ ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
@@ -805,9 +718,10 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
+ struct iommu_map_table *tbl = &iommu->tbl;
- iopte = iommu->page_table +
- ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ iopte = iommu->page_table + ((sglist[0].dma_address -
+ tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 1ec0de4156e7..f4be0d724fc6 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -48,12 +48,4 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
-unsigned long iommu_range_alloc(struct device *dev,
- struct iommu *iommu,
- unsigned long npages,
- unsigned long *handle);
-void iommu_range_free(struct iommu *iommu,
- dma_addr_t dma_addr,
- unsigned long npages);
-
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 274a9f59d95c..d2ae0f70059e 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bitmap.h>
+#include <linux/iommu-common.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
@@ -27,6 +28,10 @@
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
+#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
+#define COOKIE_PGSZ_CODE_SHIFT 60ULL
+
+
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#define LDC_PACKET_SIZE 64
@@ -98,10 +103,10 @@ static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
- /* Protects arena alloc/free. */
+ /* Protects ldc_unmap. */
spinlock_t lock;
- struct iommu_arena arena;
struct ldc_mtable_entry *page_table;
+ struct iommu_map_table iommu_map_table;
};
struct ldc_channel {
@@ -998,31 +1003,59 @@ static void free_queue(unsigned long num_entries, struct ldc_packet *q)
free_pages((unsigned long)q, order);
}
+static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
+{
+ u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
+ /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
+
+ cookie &= ~COOKIE_PGSZ_CODE;
+
+ return (cookie >> (13ULL + (szcode * 3ULL)));
+}
+
+static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie,
+ unsigned long entry, unsigned long npages)
+{
+ struct ldc_mtable_entry *base;
+ unsigned long i, shift;
+
+ shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
+ base = iommu->page_table + entry;
+ for (i = 0; i < npages; i++) {
+ if (base->cookie)
+ sun4v_ldc_revoke(id, cookie + (i << shift),
+ base->cookie);
+ base->mte = 0;
+ }
+}
+
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
-static int ldc_iommu_init(struct ldc_channel *lp)
+static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
- struct ldc_iommu *iommu = &lp->iommu;
+ struct ldc_iommu *ldc_iommu = &lp->iommu;
+ struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
-
- spin_lock_init(&iommu->lock);
+ spin_lock_init(&ldc_iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
+ iommu->map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
-
- iommu->arena.limit = num_tsb_entries;
+ iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
+ NULL, false /* no large pool */,
+ 1 /* npools */,
+ true /* skip span boundary check */);
order = get_order(tsbsize);
@@ -1037,7 +1070,7 @@ static int ldc_iommu_init(struct ldc_channel *lp)
memset(table, 0, PAGE_SIZE << order);
- iommu->page_table = table;
+ ldc_iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
@@ -1049,31 +1082,32 @@ static int ldc_iommu_init(struct ldc_channel *lp)
out_free_table:
free_pages((unsigned long) table, order);
- iommu->page_table = NULL;
+ ldc_iommu->page_table = NULL;
out_free_map:
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->map);
+ iommu->map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
- struct ldc_iommu *iommu = &lp->iommu;
+ struct ldc_iommu *ldc_iommu = &lp->iommu;
+ struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
- num_tsb_entries = iommu->arena.limit;
+ num_tsb_entries = iommu->poolsize * iommu->nr_pools;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
- free_pages((unsigned long) iommu->page_table, order);
- iommu->page_table = NULL;
+ free_pages((unsigned long) ldc_iommu->page_table, order);
+ ldc_iommu->page_table = NULL;
- kfree(iommu->arena.map);
- iommu->arena.map = NULL;
+ kfree(iommu->map);
+ iommu->map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
@@ -1140,7 +1174,7 @@ struct ldc_channel *ldc_alloc(unsigned long id,
lp->id = id;
- err = ldc_iommu_init(lp);
+ err = ldc_iommu_init(name, lp);
if (err)
goto out_free_ldc;
@@ -1885,40 +1919,6 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
}
EXPORT_SYMBOL(ldc_read);
-static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long n, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
- return -1;
- }
- }
- bitmap_set(arena->map, n, npages);
-
- arena->hint = end;
-
- return n;
-}
-
-#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
-#define COOKIE_PGSZ_CODE_SHIFT 60ULL
-
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
@@ -1945,23 +1945,14 @@ static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
page_offset);
}
-static u64 cookie_to_index(u64 cookie, unsigned long *shift)
-{
- u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
-
- cookie &= ~COOKIE_PGSZ_CODE;
-
- *shift = szcode * 3;
-
- return (cookie >> (13ULL + (szcode * 3ULL)));
-}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
- entry = arena_alloc(iommu, npages);
+ entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table,
+ npages, NULL, (unsigned long)-1, 0);
if (unlikely(entry < 0))
return NULL;
@@ -2090,7 +2081,7 @@ int ldc_map_sg(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long i, npages, flags;
+ unsigned long i, npages;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2109,9 +2100,7 @@ int ldc_map_sg(struct ldc_channel *lp,
iommu = &lp->iommu;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2136,7 +2125,7 @@ int ldc_map_single(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long npages, pa, flags;
+ unsigned long npages, pa;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2152,9 +2141,7 @@ int ldc_map_single(struct ldc_channel *lp,
iommu = &lp->iommu;
- spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2172,35 +2159,25 @@ int ldc_map_single(struct ldc_channel *lp,
}
EXPORT_SYMBOL(ldc_map_single);
+
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long i, shift, index, npages;
- struct ldc_mtable_entry *base;
+ unsigned long npages, entry;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
- index = cookie_to_index(cookie, &shift);
- base = iommu->page_table + index;
- BUG_ON(index > arena->limit ||
- (index + npages) > arena->limit);
-
- for (i = 0; i < npages; i++) {
- if (base->cookie)
- sun4v_ldc_revoke(id, cookie + (i << shift),
- base->cookie);
- base->mte = 0;
- __clear_bit(index + i, arena->map);
- }
+ entry = ldc_cookie_to_index(cookie, iommu);
+ ldc_demap(iommu, id, cookie, entry, npages);
+ iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry);
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
- unsigned long flags;
int i;
+ unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 899b7203a4e4..4371f72ff025 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -34,15 +34,17 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info,
&resources);
- if (root_bus) {
- /* Setup IRQs of all devices using custom routines */
- pci_fixup_irqs(pci_common_swizzle, info->map_irq);
-
- /* Assign devices with resources */
- pci_assign_unassigned_resources();
- } else {
+ if (!root_bus) {
pci_free_resource_list(&resources);
+ return;
}
+
+ /* Setup IRQs of all devices using custom routines */
+ pci_fixup_irqs(pci_common_swizzle, info->map_irq);
+
+ /* Assign devices with resources */
+ pci_assign_unassigned_resources();
+ pci_bus_add_devices(root_bus);
}
void pcibios_fixup_bus(struct pci_bus *pbus)
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 99632a87e697..26c80e18d7b1 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -130,26 +130,26 @@ static struct mdesc_mem_ops memblock_mdesc_ops = {
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
unsigned int handle_size;
+ struct mdesc_handle *hp;
+ unsigned long addr;
void *base;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
+ /*
+ * Allocation has to succeed because mdesc update would be missed
+ * and such events are not retransmitted.
+ */
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
- if (base) {
- struct mdesc_handle *hp;
- unsigned long addr;
-
- addr = (unsigned long)base;
- addr = (addr + 15UL) & ~15UL;
- hp = (struct mdesc_handle *) addr;
+ addr = (unsigned long)base;
+ addr = (addr + 15UL) & ~15UL;
+ hp = (struct mdesc_handle *) addr;
- mdesc_handle_init(hp, handle_size, base);
- return hp;
- }
+ mdesc_handle_init(hp, handle_size, base);
- return NULL;
+ return hp;
}
static void mdesc_kfree(struct mdesc_handle *hp)
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 9ce5afe167ff..6f7251fd2eab 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- if (pci_claim_resource(dev, i) == 0)
- continue;
-
- pci_claim_bridge_resource(dev, i);
+ pci_claim_resource(dev, i);
}
}
@@ -677,11 +674,10 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
}
pci_of_scan_bus(pbm, node, bus);
- pci_bus_add_devices(bus);
pci_bus_register_of_sysfs(bus);
pci_claim_bus_resources(bus);
-
+ pci_bus_add_devices(bus);
return bus;
}
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 47ddbd496a1e..d2fe57dad433 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/of_device.h>
+#include <linux/iommu-common.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -155,15 +156,13 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
- spin_lock_irqsave(&iommu->lock, flags);
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT));
+ *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
@@ -188,45 +187,46 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- /* Interrupts are disabled. */
- spin_lock(&iommu->lock);
- iommu_range_free(iommu, *dma_addrp, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
+static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
+ unsigned long npages)
+{
+ u32 devhandle = *(u32 *)demap_arg;
+ unsigned long num, flags;
+
+ local_irq_save(flags);
+ do {
+ num = pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages);
+
+ entry += num;
+ npages -= num;
+ } while (npages != 0);
+ local_irq_restore(flags);
+}
+
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
dma_addr_t dvma, struct dma_attrs *attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long flags, order, npages, entry;
+ unsigned long order, npages, entry;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, dvma, npages);
-
- do {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
+ entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
+ dma_4v_iommu_demap(&devhandle, entry, npages);
+ iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
@@ -253,15 +253,13 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- spin_lock_irqsave(&iommu->lock, flags);
- entry = iommu_range_alloc(dev, iommu, npages, NULL);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ (unsigned long)(-1), 0);
if (unlikely(entry == DMA_ERROR_CODE))
goto bad;
- bus_addr = (iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT));
+ bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
@@ -290,11 +288,7 @@ bad:
return DMA_ERROR_CODE;
iommu_map_fail:
- /* Interrupts are disabled. */
- spin_lock(&iommu->lock);
- iommu_range_free(iommu, bus_addr, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
return DMA_ERROR_CODE;
}
@@ -304,7 +298,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long flags, npages;
+ unsigned long npages;
long entry;
u32 devhandle;
@@ -321,22 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- iommu_range_free(iommu, bus_addr, npages);
-
- entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- do {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- } while (npages != 0);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
+ dma_4v_iommu_demap(&devhandle, entry, npages);
+ iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -371,14 +352,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- spin_lock_irqsave(&iommu->lock, flags);
+ local_irq_save(flags);
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
+ base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
@@ -391,7 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_range_alloc(dev, iommu, npages, &handle);
+ entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ &handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -404,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_batch_new_entry(entry);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->page_table_map_base +
- (entry << IO_PAGE_SHIFT);
+ dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
@@ -451,7 +432,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(err < 0L))
goto iommu_map_failed;
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
if (outcount < incount) {
outs = sg_next(outs);
@@ -469,7 +450,8 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_range_free(iommu, vaddr, npages);
+ iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ DMA_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -477,7 +459,7 @@ iommu_map_failed:
if (s == outs)
break;
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
return 0;
}
@@ -489,7 +471,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
- unsigned long flags;
+ unsigned long flags, entry;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
@@ -498,33 +480,27 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
- spin_lock_irqsave(&iommu->lock, flags);
+ local_irq_save(flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
- unsigned long npages, entry;
+ unsigned long npages;
+ struct iommu_map_table *tbl = &iommu->tbl;
+ unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- iommu_range_free(iommu, dma_handle, npages);
-
- entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
- while (npages) {
- unsigned long num;
-
- num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
- npages);
- entry += num;
- npages -= num;
- }
-
+ entry = ((dma_handle - tbl->table_map_base) >> shift);
+ dma_4v_iommu_demap(&devhandle, entry, npages);
+ iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ DMA_ERROR_CODE);
sg = sg_next(sg);
}
- spin_unlock_irqrestore(&iommu->lock, flags);
+ local_irq_restore(flags);
}
static struct dma_map_ops sun4v_dma_ops = {
@@ -550,30 +526,33 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
}
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
- struct iommu *iommu)
+ struct iommu_map_table *iommu)
{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long i, cnt = 0;
+ struct iommu_pool *pool;
+ unsigned long i, pool_nr, cnt = 0;
u32 devhandle;
devhandle = pbm->devhandle;
- for (i = 0; i < arena->limit; i++) {
- unsigned long ret, io_attrs, ra;
-
- ret = pci_sun4v_iommu_getmap(devhandle,
- HV_PCI_TSBID(0, i),
- &io_attrs, &ra);
- if (ret == HV_EOK) {
- if (page_in_phys_avail(ra)) {
- pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, i), 1);
- } else {
- cnt++;
- __set_bit(i, arena->map);
+ for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
+ pool = &(iommu->pools[pool_nr]);
+ for (i = pool->start; i <= pool->end; i++) {
+ unsigned long ret, io_attrs, ra;
+
+ ret = pci_sun4v_iommu_getmap(devhandle,
+ HV_PCI_TSBID(0, i),
+ &io_attrs, &ra);
+ if (ret == HV_EOK) {
+ if (page_in_phys_avail(ra)) {
+ pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0,
+ i), 1);
+ } else {
+ cnt++;
+ __set_bit(i, iommu->map);
+ }
}
}
}
-
return cnt;
}
@@ -603,20 +582,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
+ iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
+ iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->tbl.map) {
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
- iommu->arena.limit = num_tsb_entries;
-
- sz = probe_existing_entries(pbm, iommu);
+ iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+ sz = probe_existing_entries(pbm, &iommu->tbl);
if (sz)
printk("%s: Imported %lu TSB entries from OBP\n",
pbm->name, sz);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 6cc78c213c01..24384e1dc33d 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -391,12 +391,16 @@ static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
struct linux_pbm_info *pbm = &pcic->pbm;
pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm);
+ if (!pbm->pci_bus)
+ return;
+
#if 0 /* deadwood transplanted from sparc64 */
pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
pci_record_assignments(pbm, pbm->pci_bus);
pci_assign_unassigned(pbm, pbm->pci_bus);
pci_fixup_irq(pbm, pbm->pci_bus);
#endif
+ pci_bus_add_devices(pbm->pci_bus);
}
/*
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7e967c8018c8..eb978c77c76a 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT,
};
+static u64 m7_pcr_read(unsigned long reg_num)
+{
+ unsigned long val;
+
+ (void) sun4v_m7_get_perfreg(reg_num, &val);
+
+ return val;
+}
+
+static void m7_pcr_write(unsigned long reg_num, u64 val)
+{
+ (void) sun4v_m7_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops m7_pcr_ops = {
+ .read_pcr = m7_pcr_read,
+ .write_pcr = m7_pcr_write,
+ .read_pic = n4_pic_read,
+ .write_pic = n4_pic_write,
+ .nmi_picl_value = n4_picl_value,
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
+ PCR_N4_UTRACE | PCR_N4_TOE |
+ (26 << PCR_N4_SL_SHIFT)),
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+};
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_T5_CPU;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ perf_hsvc_group = HV_GRP_M7_PERF;
+ break;
+
default:
return -ENODEV;
}
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n5_pcr_ops;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ pcr_ops = &m7_pcr_ops;
+ break;
+
default:
ret = -ENODEV;
break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 46a5e4508752..86eebfa3b158 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4,
};
+static void sparc_m7_write_pmc(int idx, u64 val)
+{
+ u64 pcr;
+
+ pcr = pcr_ops->read_pcr(idx);
+ /* ensure ov and ntc are reset */
+ pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
+
+ pcr_ops->write_pic(idx, val & 0xffffffff);
+
+ pcr_ops->write_pcr(idx, pcr);
+}
+
+static const struct sparc_pmu sparc_m7_pmu = {
+ .event_map = niagara4_event_map,
+ .cache_map = &niagara4_cache_map,
+ .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
+ .read_pmc = sparc_vt_read_pmc,
+ .write_pmc = sparc_m7_write_pmc,
+ .upper_shift = 5,
+ .lower_shift = 5,
+ .event_mask = 0x7ff,
+ .user_bit = PCR_N4_UTRACE,
+ .priv_bit = PCR_N4_STRACE,
+
+ /* We explicitly don't support hypervisor tracing. */
+ .hv_bit = 0,
+
+ .irq_bit = PCR_N4_TOE,
+ .upper_nop = 0,
+ .lower_nop = 0,
+ .flags = 0,
+ .max_hw_events = 4,
+ .num_pcrs = 4,
+ .num_pic_regs = 4,
+};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
@@ -960,6 +996,8 @@ out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
}
+static void sparc_pmu_start(struct perf_event *event, int flags);
+
/* On this PMU each PIC has it's own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
- u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
- sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
- enc = perf_event_get_enc(cpuc->events[i]);
- cpuc->pcr[idx] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
- cpuc->pcr[idx] |= nop_for_index(idx);
- else
- cpuc->pcr[idx] |= event_encoding(enc, idx);
+ sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
for (i = 0; i < cpuc->n_events; i++) {
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
int i;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
}
}
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
unsigned long flags;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events)
@@ -1394,7 +1422,6 @@ nocheck:
ret = 0;
out:
- perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara4_pmu;
return true;
}
+ if (!strcmp(sparc_pmu_type, "sparc-m7")) {
+ sparc_pmu = &sparc_m7_pmu;
+ return true;
+ }
return false;
}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 0be7bf978cb1..46a59643bb1c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
(cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
+
+ touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index da6f1a7fc4db..61139d9924ca 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
scheduler_ipi();
}
-/* This is a nop because we capture all other cpus
- * anyways when making the PROM active.
- */
+static void stop_this_cpu(void *dummy)
+{
+ prom_stopself();
+}
+
void smp_send_stop(void)
{
+ int cpu;
+
+ if (tlb_type == hypervisor) {
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled) {
+ unsigned long hv_err;
+ hv_err = sun4v_cpu_stop(cpu);
+ if (hv_err)
+ printk(KERN_ERR "sun4v_cpu_stop() "
+ "failed err=%lu\n", hv_err);
+ } else
+#endif
+ prom_stopcpu_cpuid(cpu);
+ }
+ } else
+ smp_call_function(stop_this_cpu, NULL, 0);
}
/**
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index 82281a566bb8..167fdfd9c837 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -28,11 +28,6 @@ void check_if_starfire(void)
this_is_starfire = 1;
}
-int starfire_hard_smp_processor_id(void)
-{
- return upa_readl(0x1fff40000d0UL);
-}
-
/*
* Each Starfire board has 32 registers which perform translation
* and delivery of traditional interrupt packets into the extended
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c85403d0496c..30e7ddb27a3a 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
long err;
/* No need for backward compatibility. We can start fresh... */
- if (call <= SEMCTL) {
+ if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
err = sys_semtimedop(first, ptr,
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 73825f431b0c..8caf45ee81d9 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -181,17 +181,13 @@ static struct clocksource timer_cs = {
.rating = 100,
.read = timer_cs_read,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 2,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static __init int setup_timer_cs(void)
{
timer_cs_enabled = 1;
- timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
- timer_cs.shift);
-
- return clocksource_register(&timer_cs);
+ return clocksource_register_hz(&timer_cs, sparc_config.clock_rate);
}
#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 6fd386c5232a..4f21df7d4f13 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -433,7 +433,6 @@ void trap_init(void)
/* Force linker to barf if mismatched */
if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) ||
TI_TASK != offsetof(struct thread_info, task) ||
- TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) ||
TI_FLAGS != offsetof(struct thread_info, flags) ||
TI_CPU != offsetof(struct thread_info, cpu) ||
TI_PREEMPT != offsetof(struct thread_info, preempt_count) ||
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index a27651e866e7..d21cd625c0de 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
user_instruction_dump ((unsigned int __user *) regs->tpc);
}
+ if (panic_on_oops)
+ panic("Fatal exception");
if (regs->tstate & TSTATE_PRIV)
do_exit(SIGKILL);
do_exit(SIGSEGV);
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs)
die_if_kernel("TL0: Cache Error Exception", regs);
}
-void do_cee_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Cache Error Exception", regs);
-}
-
-void do_dae_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Data Access Exception", regs);
-}
-
-void do_iae_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: Instruction Access Exception", regs);
-}
-
void do_div0_tl1(struct pt_regs *regs)
{
exception_enter();
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs)
die_if_kernel("TL1: DIV0 Exception", regs);
}
-void do_fpdis_tl1(struct pt_regs *regs)
-{
- exception_enter();
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- die_if_kernel("TL1: FPU Disabled", regs);
-}
-
void do_fpieee_tl1(struct pt_regs *regs)
{
exception_enter();
@@ -2717,8 +2691,6 @@ void __init trap_init(void)
fault_address) ||
TI_KREGS != offsetof(struct thread_info, kregs) ||
TI_UTRAPS != offsetof(struct thread_info, utraps) ||
- TI_EXEC_DOMAIN != offsetof(struct thread_info,
- exec_domain) ||
TI_REG_WINDOW != offsetof(struct thread_info,
reg_window) ||
TI_RWIN_SPTRS != offsetof(struct thread_info,
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index b7f6334e159f..857ad4f8905f 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -8,9 +8,11 @@
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
- mov %o0, %g1
+ brz,pn %o2, 99f
+ mov %o0, %g1
+
cmp %o0, %o1
- bleu,pt %xcc, memcpy
+ bleu,pt %xcc, 2f
add %o1, %o2, %g7
cmp %g7, %o0
bleu,pt %xcc, memcpy
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
-
+99:
retl
mov %g1, %o0
+
+ /* We can't just call memcpy for these memmove cases. On some
+ * chips the memcpy uses cache initializing stores and when dst
+ * and src are close enough, those can clobber the source data
+ * before we've loaded it in.
+ */
+2: or %o0, %o1, %g7
+ or %o2, %g7, %g7
+ andcc %g7, 0x7, %g0
+ bne,pn %xcc, 4f
+ nop
+
+3: ldx [%o1], %g7
+ add %o1, 8, %o1
+ subcc %o2, 8, %o2
+ add %o0, 8, %o0
+ bne,pt %icc, 3b
+ stx %g7, [%o0 - 0x8]
+ ba,a,pt %xcc, 99b
+
+4: ldub [%o1], %g7
+ add %o1, 1, %o1
+ subcc %o2, 1, %o2
+ add %o0, 1, %o0
+ bne,pt %icc, 4b
+ stb %g7, [%o0 - 0x1]
+ ba,a,pt %xcc, 99b
ENDPROC(memmove)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3ea267c53320..4ca0d6ba5ec8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2820,7 +2820,7 @@ static int __init report_memory(void)
return 0;
}
-device_initcall(report_memory);
+arch_initcall(report_memory);
#ifdef CONFIG_SMP
#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range