summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc/mm
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile23
-rw-r--r--arch/sparc/mm/btfixup.c336
-rw-r--r--arch/sparc/mm/extable.c77
-rw-r--r--arch/sparc/mm/fault.c596
-rw-r--r--arch/sparc/mm/generic.c154
-rw-r--r--arch/sparc/mm/highmem.c120
-rw-r--r--arch/sparc/mm/hypersparc.S413
-rw-r--r--arch/sparc/mm/init.c515
-rw-r--r--arch/sparc/mm/io-unit.c318
-rw-r--r--arch/sparc/mm/iommu.c475
-rw-r--r--arch/sparc/mm/loadmmu.c46
-rw-r--r--arch/sparc/mm/nosrmmu.c59
-rw-r--r--arch/sparc/mm/nosun4c.c77
-rw-r--r--arch/sparc/mm/srmmu.c2274
-rw-r--r--arch/sparc/mm/sun4c.c2276
-rw-r--r--arch/sparc/mm/swift.S256
-rw-r--r--arch/sparc/mm/tsunami.S133
-rw-r--r--arch/sparc/mm/viking.S284
18 files changed, 8432 insertions, 0 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
new file mode 100644
index 000000000000..16eeba4b991a
--- /dev/null
+++ b/arch/sparc/mm/Makefile
@@ -0,0 +1,23 @@
+# $Id: Makefile,v 1.38 2000/12/15 00:41:22 davem Exp $
+# Makefile for the linux Sparc-specific parts of the memory manager.
+#
+
+EXTRA_AFLAGS := -ansi
+
+obj-y := fault.o init.o loadmmu.o generic.o extable.o btfixup.o
+
+ifeq ($(CONFIG_SUN4),y)
+obj-y += nosrmmu.o
+else
+obj-y += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o
+endif
+
+ifdef CONFIG_HIGHMEM
+obj-y += highmem.o
+endif
+
+ifdef CONFIG_SMP
+obj-y += nosun4c.o
+else
+obj-y += sun4c.o
+endif
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
new file mode 100644
index 000000000000..f147a44c9780
--- /dev/null
+++ b/arch/sparc/mm/btfixup.c
@@ -0,0 +1,336 @@
+/* $Id: btfixup.c,v 1.10 2000/05/09 17:40:13 davem Exp $
+ * btfixup.c: Boot time code fixup and relocator, so that
+ * we can get rid of most indirect calls to achieve single
+ * image sun4c and srmmu kernel.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/btfixup.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+
+#define BTFIXUP_OPTIMIZE_NOP
+#define BTFIXUP_OPTIMIZE_OTHER
+
+extern char *srmmu_name;
+static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
+#ifdef CONFIG_SUN4
+static char str_sun4c[] __initdata = "sun4\n";
+#else
+static char str_sun4c[] __initdata = "sun4c\n";
+#endif
+static char str_srmmu[] __initdata = "srmmu[%s]/";
+static char str_iommu[] __initdata = "iommu\n";
+static char str_iounit[] __initdata = "io-unit\n";
+
+static int visited __initdata = 0;
+extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
+extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
+static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
+static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
+static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
+static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
+static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
+static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
+static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
+static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
+static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
+static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
+static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
+static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
+static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
+static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
+
+#ifdef BTFIXUP_OPTIMIZE_OTHER
+static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
+{
+ if (!fmangled)
+ *addr = value;
+ else {
+ unsigned int *q = (unsigned int *)q1;
+ if (*addr == 0x01000000) {
+ /* Noped */
+ *q = value;
+ } else if (addr[-1] == *q) {
+ /* Moved */
+ addr[-1] = value;
+ *q = value;
+ } else {
+ prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
+ prom_halt();
+ }
+ }
+}
+#else
+static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
+{
+ *addr = value;
+}
+#endif
+
+void __init btfixup(void)
+{
+ unsigned int *p, *q;
+ int type, count;
+ unsigned insn;
+ unsigned *addr;
+ int fmangled = 0;
+ void (*flush_cacheall)(void);
+
+ if (!visited) {
+ visited++;
+ printk(version);
+ if (ARCH_SUN4C_SUN4)
+ printk(str_sun4c);
+ else {
+ printk(str_srmmu, srmmu_name);
+ if (sparc_cpu_model == sun4d)
+ printk(str_iounit);
+ else
+ printk(str_iommu);
+ }
+ }
+ for (p = ___btfixup_start; p < ___btfixup_end; ) {
+ count = p[2];
+ q = p + 3;
+ switch (type = *(unsigned char *)p) {
+ case 'f':
+ count = p[3];
+ q = p + 4;
+ if (((p[0] & 1) || p[1])
+ && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
+ prom_printf(wrong_f, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 'b':
+ if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
+ prom_printf(wrong_b, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 's':
+ if (p[1] + 0x1000 >= 0x2000) {
+ prom_printf(wrong_s, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 'h':
+ if (p[1] & 0x3ff) {
+ prom_printf(wrong_h, p, p[1]);
+ prom_halt();
+ }
+ break;
+ case 'a':
+ if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
+ prom_printf(wrong_a, p, p[1]);
+ prom_halt();
+ }
+ break;
+ }
+ if (p[0] & 1) {
+ p[0] &= ~1;
+ while (count) {
+ fmangled = 0;
+ addr = (unsigned *)*q;
+ if (addr < _stext || addr >= _end) {
+ prom_printf(wrong, type, p);
+ prom_halt();
+ }
+ insn = *addr;
+#ifdef BTFIXUP_OPTIMIZE_OTHER
+ if (type != 'f' && q[1]) {
+ insn = *(unsigned int *)q[1];
+ if (!insn || insn == 1)
+ insn = *addr;
+ else
+ fmangled = 1;
+ }
+#endif
+ switch (type) {
+ case 'f': /* CALL */
+ if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
+ *addr = p[1];
+ break;
+ } else if (!q[1]) {
+ if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
+ *addr = (insn & 0xffc00000) | (p[1] >> 10); break;
+ } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
+ *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
+ } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
+ bad_f:
+ prom_printf(insn_f, p, addr, insn, addr[1]);
+ prom_halt();
+ }
+ } else if (q[1] != 1)
+ addr[1] = q[1];
+ if (p[2] == BTFIXUPCALL_NORM) {
+ norm_f:
+ *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
+ q[1] = 0;
+ break;
+ }
+#ifndef BTFIXUP_OPTIMIZE_NOP
+ goto norm_f;
+#else
+ if (!(addr[1] & 0x80000000)) {
+ if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */
+ goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
+ } else {
+ if ((addr[1] & 0x01800000) == 0x01800000) {
+ if ((addr[1] & 0x01f80000) == 0x01e80000) {
+ /* RESTORE */
+ goto norm_f; /* It is dangerous to patch that */
+ }
+ goto bad_f;
+ }
+ if ((addr[1] & 0xffffe003) == 0x9e03e000) {
+ /* ADD %O7, XX, %o7 */
+ int displac = (addr[1] << 19);
+
+ displac = (displac >> 21) + 2;
+ *addr = (0x10800000) + (displac & 0x3fffff);
+ q[1] = addr[1];
+ addr[1] = p[2];
+ break;
+ }
+ if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
+ goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
+ if ((addr[1] & 0x3e000000) == 0x1e000000)
+ goto norm_f; /* rd is %o7. We'd better take care. */
+ }
+ if (p[2] == BTFIXUPCALL_NOP) {
+ *addr = 0x01000000;
+ q[1] = 1;
+ break;
+ }
+#ifndef BTFIXUP_OPTIMIZE_OTHER
+ goto norm_f;
+#else
+ if (addr[1] == 0x01000000) { /* NOP in the delay slot */
+ q[1] = addr[1];
+ *addr = p[2];
+ break;
+ }
+ if ((addr[1] & 0xc0000000) != 0xc0000000) {
+ /* Not a memory operation */
+ if ((addr[1] & 0x30000000) == 0x10000000) {
+ /* Ok, non-memory op with rd %oX */
+ if ((addr[1] & 0x3e000000) == 0x1c000000)
+ goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
+ if ((addr[1] & 0x3e000000) > 0x12000000 ||
+ ((addr[1] & 0x3e000000) == 0x12000000 &&
+ p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
+ ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
+ /* Nobody uses the result. We can nop it out. */
+ *addr = p[2];
+ q[1] = addr[1];
+ addr[1] = 0x01000000;
+ break;
+ }
+ if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
+ /* MOV %reg, %Ox */
+ if ((addr[1] & 0x3e000000) == 0x10000000 &&
+ (p[2] & 0x7c000) == 0x20000) {
+ /* Ok, it is call xx; mov reg, %o0 and call optimizes
+ to doing something on %o0. Patch the patch. */
+ *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
+ q[1] = addr[1];
+ addr[1] = 0x01000000;
+ break;
+ }
+ if ((addr[1] & 0x3e000000) == 0x12000000 &&
+ p[2] == BTFIXUPCALL_STO1O0) {
+ *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
+ q[1] = addr[1];
+ addr[1] = 0x01000000;
+ break;
+ }
+ }
+ }
+ }
+ *addr = addr[1];
+ q[1] = addr[1];
+ addr[1] = p[2];
+ break;
+#endif /* BTFIXUP_OPTIMIZE_OTHER */
+#endif /* BTFIXUP_OPTIMIZE_NOP */
+ case 'b': /* BLACKBOX */
+ /* Has to be sethi i, xx */
+ if ((insn & 0xc1c00000) != 0x01000000) {
+ prom_printf(insn_b, p, addr, insn);
+ prom_halt();
+ } else {
+ void (*do_fixup)(unsigned *);
+
+ do_fixup = (void (*)(unsigned *))p[1];
+ do_fixup(addr);
+ }
+ break;
+ case 's': /* SIMM13 */
+ /* Has to be or %g0, i, xx */
+ if ((insn & 0xc1ffe000) != 0x80102000) {
+ prom_printf(insn_s, p, addr, insn);
+ prom_halt();
+ }
+ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
+ break;
+ case 'h': /* SETHI */
+ /* Has to be sethi i, xx */
+ if ((insn & 0xc1c00000) != 0x01000000) {
+ prom_printf(insn_h, p, addr, insn);
+ prom_halt();
+ }
+ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
+ break;
+ case 'a': /* HALF */
+ /* Has to be sethi i, xx or or %g0, i, xx */
+ if ((insn & 0xc1c00000) != 0x01000000 &&
+ (insn & 0xc1ffe000) != 0x80102000) {
+ prom_printf(insn_a, p, addr, insn);
+ prom_halt();
+ }
+ if (p[1] & 0x3ff)
+ set_addr(addr, q[1], fmangled,
+ (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
+ else
+ set_addr(addr, q[1], fmangled,
+ (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
+ break;
+ case 'i': /* INT */
+ if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
+ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
+ else if ((insn & 0x80002000) == 0x80002000 &&
+ (insn & 0x01800000) != 0x01800000) /* %LO */
+ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
+ else {
+ prom_printf(insn_i, p, addr, insn);
+ prom_halt();
+ }
+ break;
+ }
+ count -= 2;
+ q += 2;
+ }
+ } else
+ p = q + count;
+ }
+#ifdef CONFIG_SMP
+ flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
+#else
+ flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
+#endif
+ if (!flush_cacheall) {
+ prom_printf(fca_und);
+ prom_halt();
+ }
+ (*flush_cacheall)();
+}
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
new file mode 100644
index 000000000000..c9845c71f426
--- /dev/null
+++ b/arch/sparc/mm/extable.c
@@ -0,0 +1,77 @@
+/*
+ * linux/arch/sparc/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+}
+
+/* Caller knows they are in a range if ret->fixup == 0 */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *start,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ const struct exception_table_entry *walk;
+
+ /* Single insn entries are encoded as:
+ * word 1: insn address
+ * word 2: fixup code address
+ *
+ * Range entries are encoded as:
+ * word 1: first insn address
+ * word 2: 0
+ * word 3: last insn address + 4 bytes
+ * word 4: fixup code address
+ *
+ * See asm/uaccess.h for more details.
+ */
+
+ /* 1. Try to find an exact match. */
+ for (walk = start; walk <= last; walk++) {
+ if (walk->fixup == 0) {
+ /* A range entry, skip both parts. */
+ walk++;
+ continue;
+ }
+
+ if (walk->insn == value)
+ return walk;
+ }
+
+ /* 2. Try to find a range match. */
+ for (walk = start; walk <= (last - 1); walk++) {
+ if (walk->fixup)
+ continue;
+
+ if (walk[0].insn <= value && walk[1].insn > value)
+ return walk;
+
+ walk++;
+ }
+
+ return NULL;
+}
+
+/* Special extable search, which handles ranges. Returns fixup */
+unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
+{
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(addr);
+ if (!entry)
+ return 0;
+
+ /* Inside range? Fix g2 and return correct fixup */
+ if (!entry->fixup) {
+ *g2 = (addr - entry->insn) / 4;
+ return (entry + 1)->fixup;
+ }
+
+ return entry->fixup;
+}
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
new file mode 100644
index 000000000000..37f4107bae66
--- /dev/null
+++ b/arch/sparc/mm/fault.c
@@ -0,0 +1,596 @@
+/* $Id: fault.c,v 1.122 2001/11/17 07:19:26 davem Exp $
+ * fault.c: Page fault handlers for the Sparc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/head.h>
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/memreg.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+#include <asm/traps.h>
+#include <asm/kdebug.h>
+#include <asm/uaccess.h>
+
+#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
+
+extern int prom_node_root;
+
+/* At boot time we determine these two values necessary for setting
+ * up the segment maps and page table entries (pte's).
+ */
+
+int num_segmaps, num_contexts;
+int invalid_segment;
+
+/* various Virtual Address Cache parameters we find at boot time... */
+
+int vac_size, vac_linesize, vac_do_hw_vac_flushes;
+int vac_entries_per_context, vac_entries_per_segment;
+int vac_entries_per_page;
+
+/* Nice, simple, prom library does all the sweating for us. ;) */
+int prom_probe_memory (void)
+{
+ register struct linux_mlist_v0 *mlist;
+ register unsigned long bytes, base_paddr, tally;
+ register int i;
+
+ i = 0;
+ mlist= *prom_meminfo()->v0_available;
+ bytes = tally = mlist->num_bytes;
+ base_paddr = (unsigned long) mlist->start_adr;
+
+ sp_banks[0].base_addr = base_paddr;
+ sp_banks[0].num_bytes = bytes;
+
+ while (mlist->theres_more != (void *) 0){
+ i++;
+ mlist = mlist->theres_more;
+ bytes = mlist->num_bytes;
+ tally += bytes;
+ if (i > SPARC_PHYS_BANKS-1) {
+ printk ("The machine has more banks than "
+ "this kernel can support\n"
+ "Increase the SPARC_PHYS_BANKS "
+ "setting (currently %d)\n",
+ SPARC_PHYS_BANKS);
+ i = SPARC_PHYS_BANKS-1;
+ break;
+ }
+
+ sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
+ sp_banks[i].num_bytes = mlist->num_bytes;
+ }
+
+ i++;
+ sp_banks[i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+
+ /* Now mask all bank sizes on a page boundary, it is all we can
+ * use anyways.
+ */
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ sp_banks[i].num_bytes &= PAGE_MASK;
+
+ return tally;
+}
+
+/* Traverse the memory lists in the prom to see how much physical we
+ * have.
+ */
+unsigned long
+probe_memory(void)
+{
+ int total;
+
+ total = prom_probe_memory();
+
+ /* Oh man, much nicer, keep the dirt in promlib. */
+ return total;
+}
+
+extern void sun4c_complete_all_stores(void);
+
+/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
+asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
+ unsigned long svaddr, unsigned long aerr,
+ unsigned long avaddr)
+{
+ sun4c_complete_all_stores();
+ printk("FAULT: NMI received\n");
+ printk("SREGS: Synchronous Error %08lx\n", serr);
+ printk(" Synchronous Vaddr %08lx\n", svaddr);
+ printk(" Asynchronous Error %08lx\n", aerr);
+ printk(" Asynchronous Vaddr %08lx\n", avaddr);
+ if (sun4c_memerr_reg)
+ printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
+ printk("REGISTER DUMP:\n");
+ show_regs(regs);
+ prom_halt();
+}
+
+static void unhandled_fault(unsigned long, struct task_struct *,
+ struct pt_regs *) __attribute__ ((noreturn));
+
+static void unhandled_fault(unsigned long address, struct task_struct *tsk,
+ struct pt_regs *regs)
+{
+ if((unsigned long) address < PAGE_SIZE) {
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference\n");
+ } else {
+ printk(KERN_ALERT "Unable to handle kernel paging request "
+ "at virtual address %08lx\n", address);
+ }
+ printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
+ (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
+ printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
+ (tsk->mm ? (unsigned long) tsk->mm->pgd :
+ (unsigned long) tsk->active_mm->pgd));
+ die_if_kernel("Oops", regs);
+}
+
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
+ unsigned long address)
+{
+ struct pt_regs regs;
+ unsigned long g2;
+ unsigned int insn;
+ int i;
+
+ i = search_extables_range(ret_pc, &g2);
+ switch (i) {
+ case 3:
+ /* load & store will be handled by fixup */
+ return 3;
+
+ case 1:
+ /* store will be handled by fixup, load will bump out */
+ /* for _to_ macros */
+ insn = *((unsigned int *) pc);
+ if ((insn >> 21) & 1)
+ return 1;
+ break;
+
+ case 2:
+ /* load will be handled by fixup, store will bump out */
+ /* for _from_ macros */
+ insn = *((unsigned int *) pc);
+ if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
+ return 2;
+ break;
+
+ default:
+ break;
+ };
+
+ memset(&regs, 0, sizeof (regs));
+ regs.pc = pc;
+ regs.npc = pc + 4;
+ __asm__ __volatile__(
+ "rd %%psr, %0\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n" : "=r" (regs.psr));
+ unhandled_fault(address, current, &regs);
+
+ /* Not reached */
+ return 0;
+}
+
+extern unsigned long safe_compute_effective_address(struct pt_regs *,
+ unsigned int);
+
+static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+{
+ unsigned int insn;
+
+ if (text_fault)
+ return regs->pc;
+
+ if (regs->psr & PSR_PS) {
+ insn = *(unsigned int *) regs->pc;
+ } else {
+ __get_user(insn, (unsigned int *) regs->pc);
+ }
+
+ return safe_compute_effective_address(regs, insn);
+}
+
+asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ unsigned int fixup;
+ unsigned long g2;
+ siginfo_t info;
+ int from_user = !(regs->psr & PSR_PS);
+
+ if(text_fault)
+ address = regs->pc;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (!ARCH_SUN4C_SUN4 && address >= TASK_SIZE)
+ goto vmalloc_fault;
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+
+ /*
+ * The kernel referencing a bad kernel pointer can lock up
+ * a sun4c machine completely, so we must attempt recovery.
+ */
+ if(!from_user && address >= PAGE_OFFSET)
+ goto bad_area;
+
+ vma = find_vma(mm, address);
+ if(!vma)
+ goto bad_area;
+ if(vma->vm_start <= address)
+ goto good_area;
+ if(!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if(expand_stack(vma, address))
+ goto bad_area;
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ if(write) {
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case VM_FAULT_SIGBUS:
+ goto do_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ case VM_FAULT_MINOR:
+ default:
+ current->min_flt++;
+ break;
+ }
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if(from_user) {
+#if 0
+ printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
+ tsk->comm, tsk->pid, address, regs->pc);
+#endif
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code set above to make clear whether
+ this was a SEGV_MAPERR or SEGV_ACCERR fault. */
+ info.si_addr = (void __user *)compute_si_addr(regs, text_fault);
+ info.si_trapno = 0;
+ force_sig_info (SIGSEGV, &info, tsk);
+ return;
+ }
+
+ /* Is this in ex_table? */
+no_context:
+ g2 = regs->u_regs[UREG_G2];
+ if (!from_user && (fixup = search_extables_range(regs->pc, &g2))) {
+ if (fixup > 10) { /* Values below are reserved for other things */
+ extern const unsigned __memset_start[];
+ extern const unsigned __memset_end[];
+ extern const unsigned __csum_partial_copy_start[];
+ extern const unsigned __csum_partial_copy_end[];
+
+#ifdef DEBUG_EXCEPTIONS
+ printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
+ printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
+ regs->pc, fixup, g2);
+#endif
+ if ((regs->pc >= (unsigned long)__memset_start &&
+ regs->pc < (unsigned long)__memset_end) ||
+ (regs->pc >= (unsigned long)__csum_partial_copy_start &&
+ regs->pc < (unsigned long)__csum_partial_copy_end)) {
+ regs->u_regs[UREG_I4] = address;
+ regs->u_regs[UREG_I5] = regs->pc;
+ }
+ regs->u_regs[UREG_G2] = g2;
+ regs->pc = fixup;
+ regs->npc = regs->pc + 4;
+ return;
+ }
+ }
+
+ unhandled_fault (address, tsk, regs);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", tsk->comm);
+ if (from_user)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *) compute_si_addr(regs, text_fault);
+ info.si_trapno = 0;
+ force_sig_info (SIGBUS, &info, tsk);
+ if (!from_user)
+ goto no_context;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = tsk->active_mm->pgd + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ if (!pgd_present(*pgd)) {
+ if (!pgd_present(*pgd_k))
+ goto bad_area_nosemaphore;
+ pgd_val(*pgd) = pgd_val(*pgd_k);
+ return;
+ }
+
+ pmd = pmd_offset(pgd, address);
+ pmd_k = pmd_offset(pgd_k, address);
+
+ if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_area_nosemaphore;
+ *pmd = *pmd_k;
+ return;
+ }
+}
+
+asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+{
+ extern void sun4c_update_mmu_cache(struct vm_area_struct *,
+ unsigned long,pte_t);
+ extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ pgd_t *pgdp;
+ pte_t *ptep;
+
+ if (text_fault) {
+ address = regs->pc;
+ } else if (!write &&
+ !(regs->psr & PSR_PS)) {
+ unsigned int insn, __user *ip;
+
+ ip = (unsigned int __user *)regs->pc;
+ if (!get_user(insn, ip)) {
+ if ((insn & 0xc1680000) == 0xc0680000)
+ write = 1;
+ }
+ }
+
+ if (!mm) {
+ /* We are oopsing. */
+ do_sparc_fault(regs, text_fault, write, address);
+ BUG(); /* P3 Oops already, you bitch */
+ }
+
+ pgdp = pgd_offset(mm, address);
+ ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
+
+ if (pgd_val(*pgdp)) {
+ if (write) {
+ if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
+ == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
+ unsigned long flags;
+
+ *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
+ _SUN4C_PAGE_MODIFIED |
+ _SUN4C_PAGE_VALID |
+ _SUN4C_PAGE_DIRTY);
+
+ local_irq_save(flags);
+ if (sun4c_get_segmap(address) != invalid_segment) {
+ sun4c_put_pte(address, pte_val(*ptep));
+ local_irq_restore(flags);
+ return;
+ }
+ local_irq_restore(flags);
+ }
+ } else {
+ if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
+ == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
+ unsigned long flags;
+
+ *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
+ _SUN4C_PAGE_VALID);
+
+ local_irq_save(flags);
+ if (sun4c_get_segmap(address) != invalid_segment) {
+ sun4c_put_pte(address, pte_val(*ptep));
+ local_irq_restore(flags);
+ return;
+ }
+ local_irq_restore(flags);
+ }
+ }
+ }
+
+ /* This conditional is 'interesting'. */
+ if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
+ && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
+ /* Note: It is safe to not grab the MMAP semaphore here because
+ * we know that update_mmu_cache() will not sleep for
+ * any reason (at least not in the current implementation)
+ * and therefore there is no danger of another thread getting
+ * on the CPU and doing a shrink_mmap() on this vma.
+ */
+ sun4c_update_mmu_cache (find_vma(current->mm, address), address,
+ *ptep);
+ else
+ do_sparc_fault(regs, text_fault, write, address);
+}
+
+/* This always deals with user addresses. */
+inline void force_user_fault(unsigned long address, int write)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ siginfo_t info;
+
+ info.si_code = SEGV_MAPERR;
+
+#if 0
+ printk("wf<pid=%d,wr=%d,addr=%08lx>\n",
+ tsk->pid, write, address);
+#endif
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if(!vma)
+ goto bad_area;
+ if(vma->vm_start <= address)
+ goto good_area;
+ if(!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if(expand_stack(vma, address))
+ goto bad_area;
+good_area:
+ info.si_code = SEGV_ACCERR;
+ if(write) {
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case VM_FAULT_SIGBUS:
+ case VM_FAULT_OOM:
+ goto do_sigbus;
+ }
+ up_read(&mm->mmap_sem);
+ return;
+bad_area:
+ up_read(&mm->mmap_sem);
+#if 0
+ printk("Window whee %s [%d]: segfaults at %08lx\n",
+ tsk->comm, tsk->pid, address);
+#endif
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code set above to make clear whether
+ this was a SEGV_MAPERR or SEGV_ACCERR fault. */
+ info.si_addr = (void __user *) address;
+ info.si_trapno = 0;
+ force_sig_info (SIGSEGV, &info, tsk);
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *) address;
+ info.si_trapno = 0;
+ force_sig_info (SIGBUS, &info, tsk);
+}
+
+void window_overflow_fault(void)
+{
+ unsigned long sp;
+
+ sp = current_thread_info()->rwbuf_stkptrs[0];
+ if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+ force_user_fault(sp + 0x38, 1);
+ force_user_fault(sp, 1);
+}
+
+void window_underflow_fault(unsigned long sp)
+{
+ if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+ force_user_fault(sp + 0x38, 0);
+ force_user_fault(sp, 0);
+}
+
+void window_ret_fault(struct pt_regs *regs)
+{
+ unsigned long sp;
+
+ sp = regs->u_regs[UREG_FP];
+ if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+ force_user_fault(sp + 0x38, 0);
+ force_user_fault(sp, 0);
+}
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
new file mode 100644
index 000000000000..db27eee3bda1
--- /dev/null
+++ b/arch/sparc/mm/generic.c
@@ -0,0 +1,154 @@
+/* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
+ * generic.c: Generic Sparc mm routines that are not dependent upon
+ * MMU type but are Sparc specific.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static inline void forget_pte(pte_t page)
+{
+#if 0 /* old 2.4 code */
+ if (pte_none(page))
+ return;
+ if (pte_present(page)) {
+ unsigned long pfn = pte_pfn(page);
+ struct page *ptpage;
+ if (!pfn_valid(pfn))
+ return;
+ ptpage = pfn_to_page(pfn);
+ if (PageReserved(ptpage))
+ return;
+ page_cache_release(ptpage);
+ return;
+ }
+ swap_free(pte_to_swp_entry(page));
+#else
+ if (!pte_none(page)) {
+ printk("forget_pte: old mapping existed!\n");
+ BUG();
+ }
+#endif
+}
+
+/* Remap IO memory, the same way as remap_pfn_range(), but use
+ * the obio memory space.
+ *
+ * They use a pgprot that sets PAGE_IO and does not check the
+ * mem_map table as this is independent of normal memory.
+ */
+static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t oldpage = *pte;
+ pte_clear(mm, address, pte);
+ set_pte(pte, mk_pte_io(offset, prot, space));
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ offset -= address;
+ do {
+ pte_t * pte = pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = vma->vm_mm;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+
+ spin_lock(&mm->page_table_lock);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(current->mm, dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ flush_tlb_range(vma, beg, end);
+ return error;
+}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = vma->vm_mm;
+ int space = GET_IOSPACE(pfn);
+ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+
+ spin_lock(&mm->page_table_lock);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(current->mm, dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ flush_tlb_range(vma, beg, end);
+ return error;
+}
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
new file mode 100644
index 000000000000..4d8ed9c65182
--- /dev/null
+++ b/arch/sparc/mm/highmem.c
@@ -0,0 +1,120 @@
+/*
+ * highmem.c: virtual kernel memory mappings for high memory
+ *
+ * Provides kernel-static versions of atomic kmap functions originally
+ * found as inlines in include/asm-sparc/highmem.h. These became
+ * needed as kmap_atomic() and kunmap_atomic() started getting
+ * called from within modules.
+ * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
+ *
+ * But kmap_atomic() and kunmap_atomic() cannot be inlined in
+ * modules because they are loaded with btfixup-ped functions.
+ */
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need it.
+ *
+ * XXX This is an old text. Actually, it's good to use atomic kmaps,
+ * provided you remember that they are atomic and not try to sleep
+ * with a kmap taken, much like a spinlock. Non-atomic kmaps are
+ * shared by CPUs, and so precious, and establishing them requires IPI.
+ * Atomic kmaps are lightweight and we may have NCPUS more of them.
+ */
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+
+void *kmap_atomic(struct page *page, enum km_type type)
+{
+ unsigned long idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ inc_preempt_count();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+/* XXX Fix - Anton */
+#if 0
+ __flush_cache_one(vaddr);
+#else
+ flush_cache_all();
+#endif
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+ set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+/* XXX Fix - Anton */
+#if 0
+ __flush_tlb_one(vaddr);
+#else
+ flush_tlb_all();
+#endif
+
+ return (void*) vaddr;
+}
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < FIXADDR_START) { // FIXME
+ dec_preempt_count();
+ preempt_check_resched();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+
+/* XXX Fix - Anton */
+#if 0
+ __flush_cache_one(vaddr);
+#else
+ flush_cache_all();
+#endif
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+/* XXX Fix - Anton */
+#if 0
+ __flush_tlb_one(vaddr);
+#else
+ flush_tlb_all();
+#endif
+#endif
+
+ dec_preempt_count();
+ preempt_check_resched();
+}
+
+/* We may be fed a pagetable here by ptep_to_xxx and others. */
+struct page *kmap_atomic_to_page(void *ptr)
+{
+ unsigned long idx, vaddr = (unsigned long)ptr;
+ pte_t *pte;
+
+ if (vaddr < SRMMU_NOCACHE_VADDR)
+ return virt_to_page(ptr);
+ if (vaddr < PKMAP_BASE)
+ return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
+ BUG_ON(vaddr < FIXADDR_START);
+ BUG_ON(vaddr > FIXADDR_TOP);
+
+ idx = virt_to_fix(vaddr);
+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+ return pte_page(*pte);
+}
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
new file mode 100644
index 000000000000..54b8e764b042
--- /dev/null
+++ b/arch/sparc/mm/hypersparc.S
@@ -0,0 +1,413 @@
+/* $Id: hypersparc.S,v 1.18 2001/12/21 04:56:15 davem Exp $
+ * hypersparc.S: High speed Hypersparc mmu/cache operations.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asm_offsets.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <linux/config.h>
+#include <linux/init.h>
+
+ .text
+ .align 4
+
+ .globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm
+ .globl hypersparc_flush_cache_range, hypersparc_flush_cache_page
+ .globl hypersparc_flush_page_to_ram
+ .globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
+ .globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
+ .globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
+
+hypersparc_flush_cache_all:
+ WINDOW_FLUSH(%g4, %g5)
+ sethi %hi(vac_cache_size), %g4
+ ld [%g4 + %lo(vac_cache_size)], %g5
+ sethi %hi(vac_line_size), %g1
+ ld [%g1 + %lo(vac_line_size)], %g2
+1:
+ subcc %g5, %g2, %g5 ! hyper_flush_unconditional_combined
+ bne 1b
+ sta %g0, [%g5] ASI_M_FLUSH_CTX
+ retl
+ sta %g0, [%g0] ASI_M_FLUSH_IWHOLE ! hyper_flush_whole_icache
+
+ /* We expand the window flush to get maximum performance. */
+hypersparc_flush_cache_mm:
+#ifndef CONFIG_SMP
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+ be hypersparc_flush_cache_mm_out
+#endif
+ WINDOW_FLUSH(%g4, %g5)
+
+ sethi %hi(vac_line_size), %g1
+ ld [%g1 + %lo(vac_line_size)], %o1
+ sethi %hi(vac_cache_size), %g2
+ ld [%g2 + %lo(vac_cache_size)], %o0
+ add %o1, %o1, %g1
+ add %o1, %g1, %g2
+ add %o1, %g2, %g3
+ add %o1, %g3, %g4
+ add %o1, %g4, %g5
+ add %o1, %g5, %o4
+ add %o1, %o4, %o5
+
+ /* BLAMMO! */
+1:
+ subcc %o0, %o5, %o0 ! hyper_flush_cache_user
+ sta %g0, [%o0 + %g0] ASI_M_FLUSH_USER
+ sta %g0, [%o0 + %o1] ASI_M_FLUSH_USER
+ sta %g0, [%o0 + %g1] ASI_M_FLUSH_USER
+ sta %g0, [%o0 + %g2] ASI_M_FLUSH_USER
+ sta %g0, [%o0 + %g3] ASI_M_FLUSH_USER
+ sta %g0, [%o0 + %g4] ASI_M_FLUSH_USER
+ sta %g0, [%o0 + %g5] ASI_M_FLUSH_USER
+ bne 1b
+ sta %g0, [%o0 + %o4] ASI_M_FLUSH_USER
+hypersparc_flush_cache_mm_out:
+ retl
+ nop
+
+ /* The things we do for performance... */
+hypersparc_flush_cache_range:
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+#ifndef CONFIG_SMP
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+ be hypersparc_flush_cache_range_out
+#endif
+ WINDOW_FLUSH(%g4, %g5)
+
+ sethi %hi(vac_line_size), %g1
+ ld [%g1 + %lo(vac_line_size)], %o4
+ sethi %hi(vac_cache_size), %g2
+ ld [%g2 + %lo(vac_cache_size)], %o3
+
+ /* Here comes the fun part... */
+ add %o2, (PAGE_SIZE - 1), %o2
+ andn %o1, (PAGE_SIZE - 1), %o1
+ add %o4, %o4, %o5
+ andn %o2, (PAGE_SIZE - 1), %o2
+ add %o4, %o5, %g1
+ sub %o2, %o1, %g4
+ add %o4, %g1, %g2
+ sll %o3, 2, %g5
+ add %o4, %g2, %g3
+ cmp %g4, %g5
+ add %o4, %g3, %g4
+ blu 0f
+ add %o4, %g4, %g5
+ add %o4, %g5, %g7
+
+ /* Flush entire user space, believe it or not this is quicker
+ * than page at a time flushings for range > (cache_size<<2).
+ */
+1:
+ subcc %o3, %g7, %o3
+ sta %g0, [%o3 + %g0] ASI_M_FLUSH_USER
+ sta %g0, [%o3 + %o4] ASI_M_FLUSH_USER
+ sta %g0, [%o3 + %o5] ASI_M_FLUSH_USER
+ sta %g0, [%o3 + %g1] ASI_M_FLUSH_USER
+ sta %g0, [%o3 + %g2] ASI_M_FLUSH_USER
+ sta %g0, [%o3 + %g3] ASI_M_FLUSH_USER
+ sta %g0, [%o3 + %g4] ASI_M_FLUSH_USER
+ bne 1b
+ sta %g0, [%o3 + %g5] ASI_M_FLUSH_USER
+ retl
+ nop
+
+ /* Below our threshold, flush one page at a time. */
+0:
+ ld [%o0 + AOFF_mm_context], %o0
+ mov SRMMU_CTX_REG, %g7
+ lda [%g7] ASI_M_MMUREGS, %o3
+ sta %o0, [%g7] ASI_M_MMUREGS
+ add %o2, -PAGE_SIZE, %o0
+1:
+ or %o0, 0x400, %g7
+ lda [%g7] ASI_M_FLUSH_PROBE, %g7
+ orcc %g7, 0, %g0
+ be,a 3f
+ mov %o0, %o2
+ add %o4, %g5, %g7
+2:
+ sub %o2, %g7, %o2
+ sta %g0, [%o2 + %g0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o2 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o2 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o2 + %g1] ASI_M_FLUSH_PAGE
+ sta %g0, [%o2 + %g2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o2 + %g3] ASI_M_FLUSH_PAGE
+ andcc %o2, 0xffc, %g0
+ sta %g0, [%o2 + %g4] ASI_M_FLUSH_PAGE
+ bne 2b
+ sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE
+3:
+ cmp %o2, %o1
+ bne 1b
+ add %o2, -PAGE_SIZE, %o0
+ mov SRMMU_FAULT_STATUS, %g5
+ lda [%g5] ASI_M_MMUREGS, %g0
+ mov SRMMU_CTX_REG, %g7
+ sta %o3, [%g7] ASI_M_MMUREGS
+hypersparc_flush_cache_range_out:
+ retl
+ nop
+
+ /* HyperSparc requires a valid mapping where we are about to flush
+ * in order to check for a physical tag match during the flush.
+ */
+ /* Verified, my ass... */
+hypersparc_flush_cache_page:
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ ld [%o0 + AOFF_mm_context], %g2
+#ifndef CONFIG_SMP
+ cmp %g2, -1
+ be hypersparc_flush_cache_page_out
+#endif
+ WINDOW_FLUSH(%g4, %g5)
+
+ sethi %hi(vac_line_size), %g1
+ ld [%g1 + %lo(vac_line_size)], %o4
+ mov SRMMU_CTX_REG, %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+ lda [%o3] ASI_M_MMUREGS, %o2
+ sta %g2, [%o3] ASI_M_MMUREGS
+ or %o1, 0x400, %o5
+ lda [%o5] ASI_M_FLUSH_PROBE, %g1
+ orcc %g0, %g1, %g0
+ be 2f
+ add %o4, %o4, %o5
+ sub %o1, -PAGE_SIZE, %o1
+ add %o4, %o5, %g1
+ add %o4, %g1, %g2
+ add %o4, %g2, %g3
+ add %o4, %g3, %g4
+ add %o4, %g4, %g5
+ add %o4, %g5, %g7
+
+ /* BLAMMO! */
+1:
+ sub %o1, %g7, %o1
+ sta %g0, [%o1 + %g0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g1] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+ andcc %o1, 0xffc, %g0
+ sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+ bne 1b
+ sta %g0, [%o1 + %g5] ASI_M_FLUSH_PAGE
+2:
+ mov SRMMU_FAULT_STATUS, %g7
+ mov SRMMU_CTX_REG, %g4
+ lda [%g7] ASI_M_MMUREGS, %g0
+ sta %o2, [%g4] ASI_M_MMUREGS
+hypersparc_flush_cache_page_out:
+ retl
+ nop
+
+hypersparc_flush_sig_insns:
+ flush %o1
+ retl
+ flush %o1 + 4
+
+ /* HyperSparc is copy-back. */
+hypersparc_flush_page_to_ram:
+ sethi %hi(vac_line_size), %g1
+ ld [%g1 + %lo(vac_line_size)], %o4
+ andn %o0, (PAGE_SIZE - 1), %o0
+ add %o4, %o4, %o5
+ or %o0, 0x400, %g7
+ lda [%g7] ASI_M_FLUSH_PROBE, %g5
+ add %o4, %o5, %g1
+ orcc %g5, 0, %g0
+ be 2f
+ add %o4, %g1, %g2
+ add %o4, %g2, %g3
+ sub %o0, -PAGE_SIZE, %o0
+ add %o4, %g3, %g4
+ add %o4, %g4, %g5
+ add %o4, %g5, %g7
+
+ /* BLAMMO! */
+1:
+ sub %o0, %g7, %o0
+ sta %g0, [%o0 + %g0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o0 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o0 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o0 + %g1] ASI_M_FLUSH_PAGE
+ sta %g0, [%o0 + %g2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o0 + %g3] ASI_M_FLUSH_PAGE
+ andcc %o0, 0xffc, %g0
+ sta %g0, [%o0 + %g4] ASI_M_FLUSH_PAGE
+ bne 1b
+ sta %g0, [%o0 + %g5] ASI_M_FLUSH_PAGE
+2:
+ mov SRMMU_FAULT_STATUS, %g1
+ retl
+ lda [%g1] ASI_M_MMUREGS, %g0
+
+ /* HyperSparc is IO cache coherent. */
+hypersparc_flush_page_for_dma:
+ retl
+ nop
+
+ /* It was noted that at boot time a TLB flush all in a delay slot
+ * can deliver an illegal instruction to the processor if the timing
+ * is just right...
+ */
+hypersparc_flush_tlb_all:
+ mov 0x400, %g1
+ sta %g0, [%g1] ASI_M_FLUSH_PROBE
+ retl
+ nop
+
+hypersparc_flush_tlb_mm:
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o1
+ lda [%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+ cmp %o1, -1
+ be hypersparc_flush_tlb_mm_out
+#endif
+ mov 0x300, %g2
+ sta %o1, [%g1] ASI_M_MMUREGS
+ sta %g0, [%g2] ASI_M_FLUSH_PROBE
+hypersparc_flush_tlb_mm_out:
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+hypersparc_flush_tlb_range:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+ cmp %o3, -1
+ be hypersparc_flush_tlb_range_out
+#endif
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sta %o3, [%g1] ASI_M_MMUREGS
+ and %o1, %o4, %o1
+ add %o1, 0x200, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+1:
+ sub %o1, %o4, %o1
+ cmp %o1, %o2
+ blu,a 1b
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+hypersparc_flush_tlb_range_out:
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+hypersparc_flush_tlb_page:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+#ifndef CONFIG_SMP
+ cmp %o3, -1
+ be hypersparc_flush_tlb_page_out
+#endif
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sta %o3, [%g1] ASI_M_MMUREGS
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+hypersparc_flush_tlb_page_out:
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ __INIT
+
+ /* High speed page clear/copy. */
+hypersparc_bzero_1page:
+/* NOTE: This routine has to be shorter than 40insns --jj */
+ clr %g1
+ mov 32, %g2
+ mov 64, %g3
+ mov 96, %g4
+ mov 128, %g5
+ mov 160, %g7
+ mov 192, %o2
+ mov 224, %o3
+ mov 16, %o1
+1:
+ stda %g0, [%o0 + %g0] ASI_M_BFILL
+ stda %g0, [%o0 + %g2] ASI_M_BFILL
+ stda %g0, [%o0 + %g3] ASI_M_BFILL
+ stda %g0, [%o0 + %g4] ASI_M_BFILL
+ stda %g0, [%o0 + %g5] ASI_M_BFILL
+ stda %g0, [%o0 + %g7] ASI_M_BFILL
+ stda %g0, [%o0 + %o2] ASI_M_BFILL
+ stda %g0, [%o0 + %o3] ASI_M_BFILL
+ subcc %o1, 1, %o1
+ bne 1b
+ add %o0, 256, %o0
+
+ retl
+ nop
+
+hypersparc_copy_1page:
+/* NOTE: This routine has to be shorter than 70insns --jj */
+ sub %o1, %o0, %o2 ! difference
+ mov 16, %g1
+1:
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ add %o0, 32, %o0
+ sta %o0, [%o0 + %o2] ASI_M_BCOPY
+ subcc %g1, 1, %g1
+ bne 1b
+ add %o0, 32, %o0
+
+ retl
+ nop
+
+ .globl hypersparc_setup_blockops
+hypersparc_setup_blockops:
+ sethi %hi(bzero_1page), %o0
+ or %o0, %lo(bzero_1page), %o0
+ sethi %hi(hypersparc_bzero_1page), %o1
+ or %o1, %lo(hypersparc_bzero_1page), %o1
+ sethi %hi(hypersparc_copy_1page), %o2
+ or %o2, %lo(hypersparc_copy_1page), %o2
+ ld [%o1], %o4
+1:
+ add %o1, 4, %o1
+ st %o4, [%o0]
+ add %o0, 4, %o0
+ cmp %o1, %o2
+ bne 1b
+ ld [%o1], %o4
+ sethi %hi(__copy_1page), %o0
+ or %o0, %lo(__copy_1page), %o0
+ sethi %hi(hypersparc_setup_blockops), %o2
+ or %o2, %lo(hypersparc_setup_blockops), %o2
+ ld [%o1], %o4
+1:
+ add %o1, 4, %o1
+ st %o4, [%o0]
+ add %o0, 4, %o0
+ cmp %o1, %o2
+ bne 1b
+ ld [%o1], %o4
+ sta %g0, [%g0] ASI_M_FLUSH_IWHOLE
+ retl
+ nop
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
new file mode 100644
index 000000000000..a2dea69b2f07
--- /dev/null
+++ b/arch/sparc/mm/init.c
@@ -0,0 +1,515 @@
+/* $Id: init.c,v 1.103 2001/11/19 19:03:08 davem Exp $
+ * linux/arch/sparc/mm/init.c
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/initrd.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/vac-ops.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
+#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
+#include <asm/tlb.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+unsigned long *sparc_valid_addr_bitmap;
+
+unsigned long phys_base;
+unsigned long pfn_base;
+
+unsigned long page_kernel;
+
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
+unsigned long sparc_unmapped_base;
+
+struct pgtable_cache_struct pgt_quicklists;
+
+/* References to section boundaries */
+extern char __init_begin, __init_end, _start, _end, etext , edata;
+
+/* Initial ramdisk setup */
+extern unsigned int sparc_ramdisk_image;
+extern unsigned int sparc_ramdisk_size;
+
+unsigned long highstart_pfn, highend_pfn;
+
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
+
+void __init kmap_init(void)
+{
+ /* cache the first kmap pte */
+ kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
+ kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
+}
+
+void show_mem(void)
+{
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n",
+ nr_swap_pages << (PAGE_SHIFT-10));
+ printk("%ld pages of RAM\n", totalram_pages);
+ printk("%d free pages\n", nr_free_pages());
+#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
+#ifndef CONFIG_SMP
+ if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
+ printk("%ld entries in page dir cache\n",pgd_cache_size);
+#endif
+#endif
+}
+
+void __init sparc_context_init(int numctx)
+{
+ int ctx;
+
+ ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
+
+ for(ctx = 0; ctx < numctx; ctx++) {
+ struct ctx_list *clist;
+
+ clist = (ctx_list_pool + ctx);
+ clist->ctx_number = ctx;
+ clist->ctx_mm = NULL;
+ }
+ ctx_free.next = ctx_free.prev = &ctx_free;
+ ctx_used.next = ctx_used.prev = &ctx_used;
+ for(ctx = 0; ctx < numctx; ctx++)
+ add_to_free_ctxlist(ctx_list_pool + ctx);
+}
+
+extern unsigned long cmdline_memory_size;
+unsigned long last_valid_pfn;
+
+unsigned long calc_highpages(void)
+{
+ int i;
+ int nr = 0;
+
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+ unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+
+ if (end_pfn <= max_low_pfn)
+ continue;
+
+ if (start_pfn < max_low_pfn)
+ start_pfn = max_low_pfn;
+
+ nr += end_pfn - start_pfn;
+ }
+
+ return nr;
+}
+
+unsigned long calc_max_low_pfn(void)
+{
+ int i;
+ unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
+ unsigned long curr_pfn, last_pfn;
+
+ last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
+ for (i = 1; sp_banks[i].num_bytes != 0; i++) {
+ curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+
+ if (curr_pfn >= tmp) {
+ if (last_pfn < tmp)
+ tmp = last_pfn;
+ break;
+ }
+
+ last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+ }
+
+ return tmp;
+}
+
+unsigned long __init bootmem_init(unsigned long *pages_avail)
+{
+ unsigned long bootmap_size, start_pfn;
+ unsigned long end_of_phys_memory = 0UL;
+ unsigned long bootmap_pfn, bytes_avail, size;
+ int i;
+
+ bytes_avail = 0UL;
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ end_of_phys_memory = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ bytes_avail += sp_banks[i].num_bytes;
+ if (cmdline_memory_size) {
+ if (bytes_avail > cmdline_memory_size) {
+ unsigned long slack = bytes_avail - cmdline_memory_size;
+
+ bytes_avail -= slack;
+ end_of_phys_memory -= slack;
+
+ sp_banks[i].num_bytes -= slack;
+ if (sp_banks[i].num_bytes == 0) {
+ sp_banks[i].base_addr = 0xdeadbeef;
+ } else {
+ sp_banks[i+1].num_bytes = 0;
+ sp_banks[i+1].base_addr = 0xdeadbeef;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Start with page aligned address of last symbol in kernel
+ * image.
+ */
+ start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
+
+ /* Now shift down to get the real physical page frame number. */
+ start_pfn >>= PAGE_SHIFT;
+
+ bootmap_pfn = start_pfn;
+
+ max_pfn = end_of_phys_memory >> PAGE_SHIFT;
+
+ max_low_pfn = max_pfn;
+ highstart_pfn = highend_pfn = max_pfn;
+
+ if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
+ highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
+ max_low_pfn = calc_max_low_pfn();
+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+ calc_highpages() >> (20 - PAGE_SHIFT));
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
+ if (sparc_ramdisk_image) {
+ if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
+ sparc_ramdisk_image -= KERNBASE;
+ initrd_start = sparc_ramdisk_image + phys_base;
+ initrd_end = initrd_start + sparc_ramdisk_size;
+ if (initrd_end > end_of_phys_memory) {
+ printk(KERN_CRIT "initrd extends beyond end of memory "
+ "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+ initrd_end, end_of_phys_memory);
+ initrd_start = 0;
+ }
+ if (initrd_start) {
+ if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
+ initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
+ bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
+ }
+ }
+#endif
+ /* Initialize the boot-time allocator. */
+ bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base,
+ max_low_pfn);
+
+ /* Now register the available physical memory with the
+ * allocator.
+ */
+ *pages_avail = 0;
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ unsigned long curr_pfn, last_pfn;
+
+ curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+ if (curr_pfn >= max_low_pfn)
+ break;
+
+ last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+ /*
+ * .. finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = (last_pfn - curr_pfn) << PAGE_SHIFT;
+ *pages_avail += last_pfn - curr_pfn;
+
+ free_bootmem(sp_banks[i].base_addr, size);
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ /* Reserve the initrd image area. */
+ size = initrd_end - initrd_start;
+ reserve_bootmem(initrd_start, size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
+ initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
+ }
+#endif
+ /* Reserve the kernel text/data/bss. */
+ size = (start_pfn << PAGE_SHIFT) - phys_base;
+ reserve_bootmem(phys_base, size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ /* Reserve the bootmem map. We do not account for it
+ * in pages_avail because we will release that memory
+ * in free_all_bootmem.
+ */
+ size = bootmap_size;
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ return max_pfn;
+}
+
+/*
+ * check_pgt_cache
+ *
+ * This is called at the end of unmapping of VMA (zap_page_range),
+ * to rescan the page cache for architecture specific things,
+ * presumably something like sun4/sun4c PMEGs. Most architectures
+ * define check_pgt_cache empty.
+ *
+ * We simply copy the 2.4 implementation for now.
+ */
+int pgt_cache_water[2] = { 25, 50 };
+
+void check_pgt_cache(void)
+{
+ do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
+}
+
+/*
+ * paging_init() sets up the page tables: We call the MMU specific
+ * init routine based upon the Sun model type on the Sparc.
+ *
+ */
+extern void sun4c_paging_init(void);
+extern void srmmu_paging_init(void);
+extern void device_scan(void);
+
+void __init paging_init(void)
+{
+ switch(sparc_cpu_model) {
+ case sun4c:
+ case sun4e:
+ case sun4:
+ sun4c_paging_init();
+ sparc_unmapped_base = 0xe0000000;
+ BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
+ break;
+ case sun4m:
+ case sun4d:
+ srmmu_paging_init();
+ sparc_unmapped_base = 0x50000000;
+ BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
+ break;
+ default:
+ prom_printf("paging_init: Cannot init paging on this Sparc\n");
+ prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
+ prom_printf("paging_init: Halting...\n");
+ prom_halt();
+ };
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+ btfixup();
+ device_scan();
+}
+
+struct cache_palias *sparc_aliases;
+
+static void __init taint_real_pages(void)
+{
+ int i;
+
+ for (i = 0; sp_banks[i].num_bytes; i++) {
+ unsigned long start, end;
+
+ start = sp_banks[i].base_addr;
+ end = start + sp_banks[i].num_bytes;
+
+ while (start < end) {
+ set_bit(start >> 20, sparc_valid_addr_bitmap);
+ start += PAGE_SIZE;
+ }
+ }
+}
+
+void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long tmp;
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
+#endif
+
+ for (tmp = start_pfn; tmp < end_pfn; tmp++) {
+ struct page *page = pfn_to_page(tmp);
+
+ ClearPageReserved(page);
+ set_bit(PG_highmem, &page->flags);
+ set_page_count(page, 1);
+ __free_page(page);
+ totalhigh_pages++;
+ }
+}
+
+void __init mem_init(void)
+{
+ int codepages = 0;
+ int datapages = 0;
+ int initpages = 0;
+ int reservedpages = 0;
+ int i;
+
+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+ prom_printf("BUG: fixmap and pkmap areas overlap\n");
+ prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n",
+ PKMAP_BASE,
+ (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+ FIXADDR_START);
+ prom_printf("Please mail sparclinux@vger.kernel.org.\n");
+ prom_halt();
+ }
+
+
+ /* Saves us work later. */
+ memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+
+ i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
+ i += 1;
+ sparc_valid_addr_bitmap = (unsigned long *)
+ __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
+
+ if (sparc_valid_addr_bitmap == NULL) {
+ prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
+ prom_halt();
+ }
+ memset(sparc_valid_addr_bitmap, 0, i << 2);
+
+ taint_real_pages();
+
+ max_mapnr = last_valid_pfn - pfn_base;
+ high_memory = __va(max_low_pfn << PAGE_SHIFT);
+
+ totalram_pages = free_all_bootmem();
+
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+ unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+
+ num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT;
+
+ if (end_pfn <= highstart_pfn)
+ continue;
+
+ if (start_pfn < highstart_pfn)
+ start_pfn = highstart_pfn;
+
+ map_high_region(start_pfn, end_pfn);
+ }
+
+ totalram_pages += totalhigh_pages;
+
+ codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
+ codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
+ datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
+ datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
+ initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
+ initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
+
+ /* Ignore memory holes for the purpose of counting reserved pages */
+ for (i=0; i < max_low_pfn; i++)
+ if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap)
+ && PageReserved(pfn_to_page(i)))
+ reservedpages++;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT - 10),
+ codepages << (PAGE_SHIFT-10),
+ reservedpages << (PAGE_SHIFT - 10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10),
+ totalhigh_pages << (PAGE_SHIFT-10));
+}
+
+void free_initmem (void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ struct page *p;
+
+ p = virt_to_page(addr);
+
+ ClearPageReserved(p);
+ set_page_count(p, 1);
+ __free_page(p);
+ totalram_pages++;
+ num_physpages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ struct page *p = virt_to_page(start);
+
+ ClearPageReserved(p);
+ set_page_count(p, 1);
+ __free_page(p);
+ num_physpages++;
+ }
+}
+#endif
+
+void sparc_flush_page_to_ram(struct page *page)
+{
+ unsigned long vaddr = (unsigned long)page_address(page);
+
+ if (vaddr)
+ __flush_page_to_ram(vaddr);
+}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
new file mode 100644
index 000000000000..eefffa1dc5de
--- /dev/null
+++ b/arch/sparc/mm/io-unit.c
@@ -0,0 +1,318 @@
+/* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
+ * io-unit.c: IO-UNIT specific routines for memory management.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
+#include <linux/bitops.h>
+
+#include <asm/scatterlist.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/sbus.h>
+#include <asm/io.h>
+#include <asm/io-unit.h>
+#include <asm/mxcc.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/dma.h>
+
+/* #define IOUNIT_DEBUG */
+#ifdef IOUNIT_DEBUG
+#define IOD(x) printk(x)
+#else
+#define IOD(x) do { } while (0)
+#endif
+
+#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
+#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
+
+void __init
+iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
+{
+ iopte_t *xpt, *xptend;
+ struct iounit_struct *iounit;
+ struct linux_prom_registers iommu_promregs[PROMREG_MAX];
+ struct resource r;
+
+ iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
+
+ memset(iounit, 0, sizeof(*iounit));
+ iounit->limit[0] = IOUNIT_BMAP1_START;
+ iounit->limit[1] = IOUNIT_BMAP2_START;
+ iounit->limit[2] = IOUNIT_BMAPM_START;
+ iounit->limit[3] = IOUNIT_BMAPM_END;
+ iounit->rotor[1] = IOUNIT_BMAP2_START;
+ iounit->rotor[2] = IOUNIT_BMAPM_START;
+
+ xpt = NULL;
+ if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs)) != -1) {
+ prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
+ memset(&r, 0, sizeof(r));
+ r.flags = iommu_promregs[2].which_io;
+ r.start = iommu_promregs[2].phys_addr;
+ xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
+ }
+ if(!xpt) panic("Cannot map External Page Table.");
+
+ sbus->iommu = (struct iommu_struct *)iounit;
+ iounit->page_table = xpt;
+
+ for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
+ xpt < xptend;)
+ iopte_val(*xpt++) = 0;
+}
+
+/* One has to hold iounit->lock to call this */
+static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
+{
+ int i, j, k, npages;
+ unsigned long rotor, scan, limit;
+ iopte_t iopte;
+
+ npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+ /* A tiny bit of magic ingredience :) */
+ switch (npages) {
+ case 1: i = 0x0231; break;
+ case 2: i = 0x0132; break;
+ default: i = 0x0213; break;
+ }
+
+ IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
+
+next: j = (i & 15);
+ rotor = iounit->rotor[j - 1];
+ limit = iounit->limit[j];
+ scan = rotor;
+nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
+ if (scan + npages > limit) {
+ if (limit != rotor) {
+ limit = rotor;
+ scan = iounit->limit[j - 1];
+ goto nexti;
+ }
+ i >>= 4;
+ if (!(i & 15))
+ panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
+ goto next;
+ }
+ for (k = 1, scan++; k < npages; k++)
+ if (test_bit(scan++, iounit->bmap))
+ goto nexti;
+ iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
+ scan -= npages;
+ iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
+ vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
+ for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
+ set_bit(scan, iounit->bmap);
+ iounit->page_table[scan] = iopte;
+ }
+ IOD(("%08lx\n", vaddr));
+ return vaddr;
+}
+
+static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
+{
+ unsigned long ret, flags;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ spin_lock_irqsave(&iounit->lock, flags);
+ ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
+ spin_unlock_irqrestore(&iounit->lock, flags);
+ return ret;
+}
+
+static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ unsigned long flags;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
+ spin_lock_irqsave(&iounit->lock, flags);
+ while (sz != 0) {
+ --sz;
+ sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
+ sg[sz].dvma_length = sg[sz].length;
+ }
+ spin_unlock_irqrestore(&iounit->lock, flags);
+}
+
+static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
+{
+ unsigned long flags;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ spin_lock_irqsave(&iounit->lock, flags);
+ len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
+ for (len += vaddr; vaddr < len; vaddr++)
+ clear_bit(vaddr, iounit->bmap);
+ spin_unlock_irqrestore(&iounit->lock, flags);
+}
+
+static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ unsigned long flags;
+ unsigned long vaddr, len;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ spin_lock_irqsave(&iounit->lock, flags);
+ while (sz != 0) {
+ --sz;
+ len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
+ for (len += vaddr; vaddr < len; vaddr++)
+ clear_bit(vaddr, iounit->bmap);
+ }
+ spin_unlock_irqrestore(&iounit->lock, flags);
+}
+
+#ifdef CONFIG_SBUS
+static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
+{
+ unsigned long page, end;
+ pgprot_t dvma_prot;
+ iopte_t *iopte;
+ struct sbus_bus *sbus;
+
+ *pba = addr;
+
+ dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
+ end = PAGE_ALIGN((addr + len));
+ while(addr < end) {
+ page = va;
+ {
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ long i;
+
+ pgdp = pgd_offset(&init_mm, addr);
+ pmdp = pmd_offset(pgdp, addr);
+ ptep = pte_offset_map(pmdp, addr);
+
+ set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
+
+ i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
+
+ for_each_sbus(sbus) {
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ iopte = (iopte_t *)(iounit->page_table + i);
+ *iopte = MKIOPTE(__pa(page));
+ }
+ }
+ addr += PAGE_SIZE;
+ va += PAGE_SIZE;
+ }
+ flush_cache_all();
+ flush_tlb_all();
+
+ return 0;
+}
+
+static void iounit_unmap_dma_area(unsigned long addr, int len)
+{
+ /* XXX Somebody please fill this in */
+}
+
+/* XXX We do not pass sbus device here, bad. */
+static struct page *iounit_translate_dvma(unsigned long addr)
+{
+ struct sbus_bus *sbus = sbus_root; /* They are all the same */
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+ int i;
+ iopte_t *iopte;
+
+ i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
+ iopte = (iopte_t *)(iounit->page_table + i);
+ return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
+}
+#endif
+
+static char *iounit_lockarea(char *vaddr, unsigned long len)
+{
+/* FIXME: Write this */
+ return vaddr;
+}
+
+static void iounit_unlockarea(char *vaddr, unsigned long len)
+{
+/* FIXME: Write this */
+}
+
+void __init ld_mmu_iounit(void)
+{
+ BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
+
+#ifdef CONFIG_SBUS
+ BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
+#endif
+}
+
+__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
+{
+ int i, j, k, npages;
+ unsigned long rotor, scan, limit;
+ unsigned long flags;
+ __u32 ret;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ i = 0x0213;
+ spin_lock_irqsave(&iounit->lock, flags);
+next: j = (i & 15);
+ rotor = iounit->rotor[j - 1];
+ limit = iounit->limit[j];
+ scan = rotor;
+nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
+ if (scan + npages > limit) {
+ if (limit != rotor) {
+ limit = rotor;
+ scan = iounit->limit[j - 1];
+ goto nexti;
+ }
+ i >>= 4;
+ if (!(i & 15))
+ panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
+ goto next;
+ }
+ for (k = 1, scan++; k < npages; k++)
+ if (test_bit(scan++, iounit->bmap))
+ goto nexti;
+ iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
+ scan -= npages;
+ ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
+ for (k = 0; k < npages; k++, scan++)
+ set_bit(scan, iounit->bmap);
+ spin_unlock_irqrestore(&iounit->lock, flags);
+ return ret;
+}
+
+__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
+{
+ int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
+
+ iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
+ return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
+}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
new file mode 100644
index 000000000000..489bf68d5f05
--- /dev/null
+++ b/arch/sparc/mm/iommu.c
@@ -0,0 +1,475 @@
+/*
+ * iommu.c: IOMMU specific routines for memory management.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
+
+#include <asm/scatterlist.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/sbus.h>
+#include <asm/io.h>
+#include <asm/mxcc.h>
+#include <asm/mbus.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/bitext.h>
+#include <asm/iommu.h>
+#include <asm/dma.h>
+
+/*
+ * This can be sized dynamically, but we will do this
+ * only when we have a guidance about actual I/O pressures.
+ */
+#define IOMMU_RNGE IOMMU_RNGE_256MB
+#define IOMMU_START 0xF0000000
+#define IOMMU_WINSIZE (256*1024*1024U)
+#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
+#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
+
+/* srmmu.c */
+extern int viking_mxcc_present;
+BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
+#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
+extern int flush_page_for_dma_global;
+static int viking_flush;
+/* viking.S */
+extern void viking_flush_page(unsigned long page);
+extern void viking_mxcc_flush_page(unsigned long page);
+
+/*
+ * Values precomputed according to CPU type.
+ */
+static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
+static pgprot_t dvma_prot; /* Consistent mapping pte flags */
+
+#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
+#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
+
+void __init
+iommu_init(int iommund, struct sbus_bus *sbus)
+{
+ unsigned int impl, vers;
+ unsigned long tmp;
+ struct iommu_struct *iommu;
+ struct linux_prom_registers iommu_promregs[PROMREG_MAX];
+ struct resource r;
+ unsigned long *bitmap;
+
+ iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("Unable to allocate iommu structure\n");
+ prom_halt();
+ }
+ iommu->regs = NULL;
+ if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs)) != -1) {
+ memset(&r, 0, sizeof(r));
+ r.flags = iommu_promregs[0].which_io;
+ r.start = iommu_promregs[0].phys_addr;
+ iommu->regs = (struct iommu_regs *)
+ sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
+ }
+ if (!iommu->regs) {
+ prom_printf("Cannot map IOMMU registers\n");
+ prom_halt();
+ }
+ impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
+ vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
+ tmp = iommu->regs->control;
+ tmp &= ~(IOMMU_CTRL_RNGE);
+ tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
+ iommu->regs->control = tmp;
+ iommu_invalidate(iommu->regs);
+ iommu->start = IOMMU_START;
+ iommu->end = 0xffffffff;
+
+ /* Allocate IOMMU page table */
+ /* Stupid alignment constraints give me a headache.
+ We need 256K or 512K or 1M or 2M area aligned to
+ its size and current gfp will fortunately give
+ it to us. */
+ tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
+ if (!tmp) {
+ prom_printf("Unable to allocate iommu table [0x%08x]\n",
+ IOMMU_NPTES*sizeof(iopte_t));
+ prom_halt();
+ }
+ iommu->page_table = (iopte_t *)tmp;
+
+ /* Initialize new table. */
+ memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
+ flush_cache_all();
+ flush_tlb_all();
+ iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
+ iommu_invalidate(iommu->regs);
+
+ bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
+ if (!bitmap) {
+ prom_printf("Unable to allocate iommu bitmap [%d]\n",
+ (int)(IOMMU_NPTES>>3));
+ prom_halt();
+ }
+ bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
+ /* To be coherent on HyperSparc, the page color of DVMA
+ * and physical addresses must match.
+ */
+ if (srmmu_modtype == HyperSparc)
+ iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
+ else
+ iommu->usemap.num_colors = 1;
+
+ printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
+ impl, vers, iommu->page_table,
+ (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
+
+ sbus->iommu = iommu;
+}
+
+/* This begs to be btfixup-ed by srmmu. */
+/* Flush the iotlb entries to ram. */
+/* This could be better if we didn't have to flush whole pages. */
+static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
+{
+ unsigned long start;
+ unsigned long end;
+
+ start = (unsigned long)iopte & PAGE_MASK;
+ end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
+ if (viking_mxcc_present) {
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if (viking_flush) {
+ while(start < end) {
+ viking_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else {
+ while(start < end) {
+ __flush_page_to_ram(start);
+ start += PAGE_SIZE;
+ }
+ }
+}
+
+static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
+{
+ struct iommu_struct *iommu = sbus->iommu;
+ int ioptex;
+ iopte_t *iopte, *iopte0;
+ unsigned int busa, busa0;
+ int i;
+
+ /* page color = pfn of page */
+ ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
+ if (ioptex < 0)
+ panic("iommu out");
+ busa0 = iommu->start + (ioptex << PAGE_SHIFT);
+ iopte0 = &iommu->page_table[ioptex];
+
+ busa = busa0;
+ iopte = iopte0;
+ for (i = 0; i < npages; i++) {
+ iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
+ iommu_invalidate_page(iommu->regs, busa);
+ busa += PAGE_SIZE;
+ iopte++;
+ page++;
+ }
+
+ iommu_flush_iotlb(iopte0, npages);
+
+ return busa0;
+}
+
+static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
+ struct sbus_bus *sbus)
+{
+ unsigned long off;
+ int npages;
+ struct page *page;
+ u32 busa;
+
+ off = (unsigned long)vaddr & ~PAGE_MASK;
+ npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
+ page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
+ busa = iommu_get_one(page, npages, sbus);
+ return busa + off;
+}
+
+static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
+{
+ return iommu_get_scsi_one(vaddr, len, sbus);
+}
+
+static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
+{
+ flush_page_for_dma(0);
+ return iommu_get_scsi_one(vaddr, len, sbus);
+}
+
+static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
+{
+ unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
+
+ while(page < ((unsigned long)(vaddr + len))) {
+ flush_page_for_dma(page);
+ page += PAGE_SIZE;
+ }
+ return iommu_get_scsi_one(vaddr, len, sbus);
+}
+
+static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ int n;
+
+ while (sz != 0) {
+ --sz;
+ n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+ sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_length = (__u32) sg->length;
+ sg++;
+ }
+}
+
+static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ int n;
+
+ flush_page_for_dma(0);
+ while (sz != 0) {
+ --sz;
+ n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+ sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_length = (__u32) sg->length;
+ sg++;
+ }
+}
+
+static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ unsigned long page, oldpage = 0;
+ int n, i;
+
+ while(sz != 0) {
+ --sz;
+
+ n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+ /*
+ * We expect unmapped highmem pages to be not in the cache.
+ * XXX Is this a good assumption?
+ * XXX What if someone else unmaps it here and races us?
+ */
+ if ((page = (unsigned long) page_address(sg->page)) != 0) {
+ for (i = 0; i < n; i++) {
+ if (page != oldpage) { /* Already flushed? */
+ flush_page_for_dma(page);
+ oldpage = page;
+ }
+ page += PAGE_SIZE;
+ }
+ }
+
+ sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_length = (__u32) sg->length;
+ sg++;
+ }
+}
+
+static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
+{
+ struct iommu_struct *iommu = sbus->iommu;
+ int ioptex;
+ int i;
+
+ if (busa < iommu->start)
+ BUG();
+ ioptex = (busa - iommu->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ iopte_val(iommu->page_table[ioptex + i]) = 0;
+ iommu_invalidate_page(iommu->regs, busa);
+ busa += PAGE_SIZE;
+ }
+ bit_map_clear(&iommu->usemap, ioptex, npages);
+}
+
+static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
+{
+ unsigned long off;
+ int npages;
+
+ off = vaddr & ~PAGE_MASK;
+ npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
+ iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
+}
+
+static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ int n;
+
+ while(sz != 0) {
+ --sz;
+
+ n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+ iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
+ sg->dvma_address = 0x21212121;
+ sg++;
+ }
+}
+
+#ifdef CONFIG_SBUS
+static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
+ unsigned long addr, int len)
+{
+ unsigned long page, end;
+ struct iommu_struct *iommu = sbus_root->iommu;
+ iopte_t *iopte = iommu->page_table;
+ iopte_t *first;
+ int ioptex;
+
+ if ((va & ~PAGE_MASK) != 0) BUG();
+ if ((addr & ~PAGE_MASK) != 0) BUG();
+ if ((len & ~PAGE_MASK) != 0) BUG();
+
+ /* page color = physical address */
+ ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
+ addr >> PAGE_SHIFT);
+ if (ioptex < 0)
+ panic("iommu out");
+
+ iopte += ioptex;
+ first = iopte;
+ end = addr + len;
+ while(addr < end) {
+ page = va;
+ {
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (viking_mxcc_present)
+ viking_mxcc_flush_page(page);
+ else if (viking_flush)
+ viking_flush_page(page);
+ else
+ __flush_page_to_ram(page);
+
+ pgdp = pgd_offset(&init_mm, addr);
+ pmdp = pmd_offset(pgdp, addr);
+ ptep = pte_offset_map(pmdp, addr);
+
+ set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
+ }
+ iopte_val(*iopte++) =
+ MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
+ addr += PAGE_SIZE;
+ va += PAGE_SIZE;
+ }
+ /* P3: why do we need this?
+ *
+ * DAVEM: Because there are several aspects, none of which
+ * are handled by a single interface. Some cpus are
+ * completely not I/O DMA coherent, and some have
+ * virtually indexed caches. The driver DMA flushing
+ * methods handle the former case, but here during
+ * IOMMU page table modifications, and usage of non-cacheable
+ * cpu mappings of pages potentially in the cpu caches, we have
+ * to handle the latter case as well.
+ */
+ flush_cache_all();
+ iommu_flush_iotlb(first, len >> PAGE_SHIFT);
+ flush_tlb_all();
+ iommu_invalidate(iommu->regs);
+
+ *pba = iommu->start + (ioptex << PAGE_SHIFT);
+ return 0;
+}
+
+static void iommu_unmap_dma_area(unsigned long busa, int len)
+{
+ struct iommu_struct *iommu = sbus_root->iommu;
+ iopte_t *iopte = iommu->page_table;
+ unsigned long end;
+ int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
+
+ if ((busa & ~PAGE_MASK) != 0) BUG();
+ if ((len & ~PAGE_MASK) != 0) BUG();
+
+ iopte += ioptex;
+ end = busa + len;
+ while (busa < end) {
+ iopte_val(*iopte++) = 0;
+ busa += PAGE_SIZE;
+ }
+ flush_tlb_all();
+ iommu_invalidate(iommu->regs);
+ bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
+}
+
+static struct page *iommu_translate_dvma(unsigned long busa)
+{
+ struct iommu_struct *iommu = sbus_root->iommu;
+ iopte_t *iopte = iommu->page_table;
+
+ iopte += ((busa - iommu->start) >> PAGE_SHIFT);
+ return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
+}
+#endif
+
+static char *iommu_lockarea(char *vaddr, unsigned long len)
+{
+ return vaddr;
+}
+
+static void iommu_unlockarea(char *vaddr, unsigned long len)
+{
+}
+
+void __init ld_mmu_iommu(void)
+{
+ viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
+ BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
+
+ if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
+ /* IO coherent chip */
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
+ } else if (flush_page_for_dma_global) {
+ /* flush_page_for_dma flushes everything, no matter of what page is it */
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
+ } else {
+ BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
+ }
+ BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
+
+#ifdef CONFIG_SBUS
+ BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
+#endif
+
+ if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
+ dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
+ ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
+ } else {
+ dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
+ ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
+ }
+}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
new file mode 100644
index 000000000000..e9f9571601ba
--- /dev/null
+++ b/arch/sparc/mm/loadmmu.c
@@ -0,0 +1,46 @@
+/* $Id: loadmmu.c,v 1.56 2000/02/08 20:24:21 davem Exp $
+ * loadmmu.c: This code loads up all the mm function pointers once the
+ * machine type has been determined. It also sets the static
+ * mmu values such as PAGE_NONE, etc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/a.out.h>
+#include <asm/mmu_context.h>
+#include <asm/oplib.h>
+
+struct ctx_list *ctx_list_pool;
+struct ctx_list ctx_free;
+struct ctx_list ctx_used;
+
+unsigned int pg_iobits;
+
+extern void ld_mmu_sun4c(void);
+extern void ld_mmu_srmmu(void);
+
+void __init load_mmu(void)
+{
+ switch(sparc_cpu_model) {
+ case sun4c:
+ case sun4:
+ ld_mmu_sun4c();
+ break;
+ case sun4m:
+ case sun4d:
+ ld_mmu_srmmu();
+ break;
+ default:
+ prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
+ prom_halt();
+ }
+ btfixup();
+}
diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c
new file mode 100644
index 000000000000..9e215659697e
--- /dev/null
+++ b/arch/sparc/mm/nosrmmu.c
@@ -0,0 +1,59 @@
+/* $Id: nosrmmu.c,v 1.5 1999/11/19 04:11:54 davem Exp $
+ * nosrmmu.c: This file is a bunch of dummies for sun4 compiles,
+ * so that it does not need srmmu and avoid ifdefs.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <asm/mbus.h>
+#include <asm/sbus.h>
+
+static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n";
+
+enum mbus_module srmmu_modtype;
+void *srmmu_nocache_pool;
+
+int vac_cache_size = 0;
+
+static void __init should_not_happen(void)
+{
+ prom_printf(shouldnothappen);
+ prom_halt();
+}
+
+void __init srmmu_frob_mem_map(unsigned long start_mem)
+{
+ should_not_happen();
+}
+
+unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ should_not_happen();
+ return 0;
+}
+
+void __init ld_mmu_srmmu(void)
+{
+ should_not_happen();
+}
+
+void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
+{
+}
+
+void srmmu_unmapioaddr(unsigned long virt_addr)
+{
+}
+
+__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
+{
+ return 0;
+}
+
+__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
+{
+ return 0;
+}
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c
new file mode 100644
index 000000000000..ea2e2105341d
--- /dev/null
+++ b/arch/sparc/mm/nosun4c.c
@@ -0,0 +1,77 @@
+/* $Id: nosun4c.c,v 1.3 2000/02/14 04:52:36 jj Exp $
+ * nosun4c.c: This file is a bunch of dummies for SMP compiles,
+ * so that it does not need sun4c and avoid ifdefs.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <asm/pgtable.h>
+
+static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
+
+/* Dummies */
+struct sun4c_mmu_ring {
+ unsigned long xxx1[3];
+ unsigned char xxx2[2];
+ int xxx3;
+};
+struct sun4c_mmu_ring sun4c_kernel_ring;
+struct sun4c_mmu_ring sun4c_kfree_ring;
+unsigned long sun4c_kernel_faults;
+unsigned long *sun4c_memerr_reg;
+
+static void __init should_not_happen(void)
+{
+ prom_printf(shouldnothappen);
+ prom_halt();
+}
+
+unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ should_not_happen();
+ return 0;
+}
+
+void __init ld_mmu_sun4c(void)
+{
+ should_not_happen();
+}
+
+void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
+{
+}
+
+void sun4c_unmapioaddr(unsigned long virt_addr)
+{
+}
+
+void sun4c_complete_all_stores(void)
+{
+}
+
+pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
+{
+ return NULL;
+}
+
+pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
+{
+ return NULL;
+}
+
+void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+}
+
+void __init sun4c_probe_vac(void)
+{
+ should_not_happen();
+}
+
+void __init sun4c_probe_memerr_reg(void)
+{
+ should_not_happen();
+}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
new file mode 100644
index 000000000000..c89a803cbc20
--- /dev/null
+++ b/arch/sparc/mm/srmmu.c
@@ -0,0 +1,2274 @@
+/*
+ * srmmu.c: SRMMU specific routines for memory management.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/bitext.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/kdebug.h>
+#include <asm/vaddrs.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/mbus.h>
+#include <asm/cache.h>
+#include <asm/oplib.h>
+#include <asm/sbus.h>
+#include <asm/asi.h>
+#include <asm/msi.h>
+#include <asm/a.out.h>
+#include <asm/mmu_context.h>
+#include <asm/io-unit.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+/* Now the cpu specific definitions. */
+#include <asm/viking.h>
+#include <asm/mxcc.h>
+#include <asm/ross.h>
+#include <asm/tsunami.h>
+#include <asm/swift.h>
+#include <asm/turbosparc.h>
+
+#include <asm/btfixup.h>
+
+enum mbus_module srmmu_modtype;
+unsigned int hwbug_bitmask;
+int vac_cache_size;
+int vac_line_size;
+
+extern struct resource sparc_iomap;
+
+extern unsigned long last_valid_pfn;
+
+extern unsigned long page_kernel;
+
+pgd_t *srmmu_swapper_pg_dir;
+
+#ifdef CONFIG_SMP
+#define FLUSH_BEGIN(mm)
+#define FLUSH_END
+#else
+#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
+#define FLUSH_END }
+#endif
+
+BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
+#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
+
+int flush_page_for_dma_global = 1;
+
+#ifdef CONFIG_SMP
+BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
+#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
+#endif
+
+char *srmmu_name;
+
+ctxd_t *srmmu_ctx_table_phys;
+ctxd_t *srmmu_context_table;
+
+int viking_mxcc_present;
+static DEFINE_SPINLOCK(srmmu_context_spinlock);
+
+int is_hypersparc;
+
+/*
+ * In general all page table modifications should use the V8 atomic
+ * swap instruction. This insures the mmu and the cpu are in sync
+ * with respect to ref/mod bits in the page tables.
+ */
+static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
+{
+ __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
+ return value;
+}
+
+static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
+{
+ srmmu_swap((unsigned long *)ptep, pte_val(pteval));
+}
+
+/* The very generic SRMMU page table operations. */
+static inline int srmmu_device_memory(unsigned long x)
+{
+ return ((x & 0xF0000000) != 0);
+}
+
+int srmmu_cache_pagetables;
+
+/* these will be initialized in srmmu_nocache_calcsize() */
+unsigned long srmmu_nocache_size;
+unsigned long srmmu_nocache_end;
+
+/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
+#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
+
+/* The context table is a nocache user with the biggest alignment needs. */
+#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
+
+void *srmmu_nocache_pool;
+void *srmmu_nocache_bitmap;
+static struct bit_map srmmu_nocache_map;
+
+static unsigned long srmmu_pte_pfn(pte_t pte)
+{
+ if (srmmu_device_memory(pte_val(pte))) {
+ /* Just return something that will cause
+ * pfn_valid() to return false. This makes
+ * copy_one_pte() to just directly copy to
+ * PTE over.
+ */
+ return ~0UL;
+ }
+ return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
+}
+
+static struct page *srmmu_pmd_page(pmd_t pmd)
+{
+
+ if (srmmu_device_memory(pmd_val(pmd)))
+ BUG();
+ return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
+}
+
+static inline unsigned long srmmu_pgd_page(pgd_t pgd)
+{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
+
+
+static inline int srmmu_pte_none(pte_t pte)
+{ return !(pte_val(pte) & 0xFFFFFFF); }
+
+static inline int srmmu_pte_present(pte_t pte)
+{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
+
+static inline int srmmu_pte_read(pte_t pte)
+{ return !(pte_val(pte) & SRMMU_NOREAD); }
+
+static inline void srmmu_pte_clear(pte_t *ptep)
+{ srmmu_set_pte(ptep, __pte(0)); }
+
+static inline int srmmu_pmd_none(pmd_t pmd)
+{ return !(pmd_val(pmd) & 0xFFFFFFF); }
+
+static inline int srmmu_pmd_bad(pmd_t pmd)
+{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
+
+static inline int srmmu_pmd_present(pmd_t pmd)
+{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
+
+static inline void srmmu_pmd_clear(pmd_t *pmdp) {
+ int i;
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
+ srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
+}
+
+static inline int srmmu_pgd_none(pgd_t pgd)
+{ return !(pgd_val(pgd) & 0xFFFFFFF); }
+
+static inline int srmmu_pgd_bad(pgd_t pgd)
+{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
+
+static inline int srmmu_pgd_present(pgd_t pgd)
+{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
+
+static inline void srmmu_pgd_clear(pgd_t * pgdp)
+{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
+
+static inline pte_t srmmu_pte_wrprotect(pte_t pte)
+{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
+
+static inline pte_t srmmu_pte_mkclean(pte_t pte)
+{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
+
+static inline pte_t srmmu_pte_mkold(pte_t pte)
+{ return __pte(pte_val(pte) & ~SRMMU_REF);}
+
+static inline pte_t srmmu_pte_mkwrite(pte_t pte)
+{ return __pte(pte_val(pte) | SRMMU_WRITE);}
+
+static inline pte_t srmmu_pte_mkdirty(pte_t pte)
+{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
+
+static inline pte_t srmmu_pte_mkyoung(pte_t pte)
+{ return __pte(pte_val(pte) | SRMMU_REF);}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
+{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
+
+static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
+{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
+
+static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
+{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
+
+/* XXX should we hyper_flush_whole_icache here - Anton */
+static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
+{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
+
+static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
+
+static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ unsigned long ptp; /* Physical address, shifted right by 4 */
+ int i;
+
+ ptp = __nocache_pa((unsigned long) ptep) >> 4;
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
+ srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
+ ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
+ }
+}
+
+static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
+{
+ unsigned long ptp; /* Physical address, shifted right by 4 */
+ int i;
+
+ ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
+ srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
+ ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
+ }
+}
+
+static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
+{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
+
+/* to find an entry in a top-level page table... */
+extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
+
+/* Find an entry in the second-level page table.. */
+static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) srmmu_pgd_page(*dir) +
+ ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+}
+
+/* Find an entry in the third-level page table.. */
+static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
+{
+ void *pte;
+
+ pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
+ return (pte_t *) pte +
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+}
+
+static unsigned long srmmu_swp_type(swp_entry_t entry)
+{
+ return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
+}
+
+static unsigned long srmmu_swp_offset(swp_entry_t entry)
+{
+ return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
+}
+
+static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
+{
+ return (swp_entry_t) {
+ (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
+ | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
+}
+
+/*
+ * size: bytes to allocate in the nocache area.
+ * align: bytes, number to align at.
+ * Returns the virtual address of the allocated area.
+ */
+static unsigned long __srmmu_get_nocache(int size, int align)
+{
+ int offset;
+
+ if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+ printk("Size 0x%x too small for nocache request\n", size);
+ size = SRMMU_NOCACHE_BITMAP_SHIFT;
+ }
+ if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
+ printk("Size 0x%x unaligned int nocache request\n", size);
+ size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
+ }
+ BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
+
+ offset = bit_map_string_get(&srmmu_nocache_map,
+ size >> SRMMU_NOCACHE_BITMAP_SHIFT,
+ align >> SRMMU_NOCACHE_BITMAP_SHIFT);
+ if (offset == -1) {
+ printk("srmmu: out of nocache %d: %d/%d\n",
+ size, (int) srmmu_nocache_size,
+ srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
+ return 0;
+ }
+
+ return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
+}
+
+unsigned inline long srmmu_get_nocache(int size, int align)
+{
+ unsigned long tmp;
+
+ tmp = __srmmu_get_nocache(size, align);
+
+ if (tmp)
+ memset((void *)tmp, 0, size);
+
+ return tmp;
+}
+
+void srmmu_free_nocache(unsigned long vaddr, int size)
+{
+ int offset;
+
+ if (vaddr < SRMMU_NOCACHE_VADDR) {
+ printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
+ vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
+ BUG();
+ }
+ if (vaddr+size > srmmu_nocache_end) {
+ printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
+ vaddr, srmmu_nocache_end);
+ BUG();
+ }
+ if (size & (size-1)) {
+ printk("Size 0x%x is not a power of 2\n", size);
+ BUG();
+ }
+ if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+ printk("Size 0x%x is too small\n", size);
+ BUG();
+ }
+ if (vaddr & (size-1)) {
+ printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
+ BUG();
+ }
+
+ offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
+ size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+ bit_map_clear(&srmmu_nocache_map, offset, size);
+}
+
+void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
+
+extern unsigned long probe_memory(void); /* in fault.c */
+
+/*
+ * Reserve nocache dynamically proportionally to the amount of
+ * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
+ */
+void srmmu_nocache_calcsize(void)
+{
+ unsigned long sysmemavail = probe_memory() / 1024;
+ int srmmu_nocache_npages;
+
+ srmmu_nocache_npages =
+ sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
+
+ /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
+ // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
+ if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
+ srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
+
+ /* anything above 1280 blows up */
+ if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
+ srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
+
+ srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
+ srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
+}
+
+void srmmu_nocache_init(void)
+{
+ unsigned int bitmap_bits;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long paddr, vaddr;
+ unsigned long pteval;
+
+ bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+ srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
+ SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+ memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
+
+ srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
+ bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
+
+ srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+ memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
+ init_mm.pgd = srmmu_swapper_pg_dir;
+
+ srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
+
+ paddr = __pa((unsigned long)srmmu_nocache_pool);
+ vaddr = SRMMU_NOCACHE_VADDR;
+
+ while (vaddr < srmmu_nocache_end) {
+ pgd = pgd_offset_k(vaddr);
+ pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
+ pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
+
+ pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
+
+ if (srmmu_cache_pagetables)
+ pteval |= SRMMU_CACHE;
+
+ srmmu_set_pte(__nocache_fix(pte), __pte(pteval));
+
+ vaddr += PAGE_SIZE;
+ paddr += PAGE_SIZE;
+ }
+
+ flush_cache_all();
+ flush_tlb_all();
+}
+
+static inline pgd_t *srmmu_get_pgd_fast(void)
+{
+ pgd_t *pgd = NULL;
+
+ pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+ if (pgd) {
+ pgd_t *init = pgd_offset_k(0);
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+
+ return pgd;
+}
+
+static void srmmu_free_pgd_fast(pgd_t *pgd)
+{
+ srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
+}
+
+static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+}
+
+static void srmmu_pmd_free(pmd_t * pmd)
+{
+ srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
+}
+
+/*
+ * Hardware needs alignment to 256 only, but we align to whole page size
+ * to reduce fragmentation problems due to the buddy principle.
+ * XXX Provide actual fragmentation statistics in /proc.
+ *
+ * Alignments up to the page size are the same for physical and virtual
+ * addresses of the nocache area.
+ */
+static pte_t *
+srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+}
+
+static struct page *
+srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ unsigned long pte;
+
+ if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
+ return NULL;
+ return pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
+}
+
+static void srmmu_free_pte_fast(pte_t *pte)
+{
+ srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
+}
+
+static void srmmu_pte_free(struct page *pte)
+{
+ unsigned long p;
+
+ p = (unsigned long)page_address(pte); /* Cached address (for test) */
+ if (p == 0)
+ BUG();
+ p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
+ p = (unsigned long) __nocache_va(p); /* Nocached virtual */
+ srmmu_free_nocache(p, PTE_SIZE);
+}
+
+/*
+ */
+static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
+{
+ struct ctx_list *ctxp;
+
+ ctxp = ctx_free.next;
+ if(ctxp != &ctx_free) {
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ mm->context = ctxp->ctx_number;
+ ctxp->ctx_mm = mm;
+ return;
+ }
+ ctxp = ctx_used.next;
+ if(ctxp->ctx_mm == old_mm)
+ ctxp = ctxp->next;
+ if(ctxp == &ctx_used)
+ panic("out of mmu contexts");
+ flush_cache_mm(ctxp->ctx_mm);
+ flush_tlb_mm(ctxp->ctx_mm);
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ ctxp->ctx_mm->context = NO_CONTEXT;
+ ctxp->ctx_mm = mm;
+ mm->context = ctxp->ctx_number;
+}
+
+static inline void free_context(int context)
+{
+ struct ctx_list *ctx_old;
+
+ ctx_old = ctx_list_pool + context;
+ remove_from_ctx_list(ctx_old);
+ add_to_free_ctxlist(ctx_old);
+}
+
+
+static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+ struct task_struct *tsk, int cpu)
+{
+ if(mm->context == NO_CONTEXT) {
+ spin_lock(&srmmu_context_spinlock);
+ alloc_context(old_mm, mm);
+ spin_unlock(&srmmu_context_spinlock);
+ srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+ }
+
+ if (is_hypersparc)
+ hyper_flush_whole_icache();
+
+ srmmu_set_context(mm->context);
+}
+
+/* Low level IO area allocation on the SRMMU. */
+static inline void srmmu_mapioaddr(unsigned long physaddr,
+ unsigned long virt_addr, int bus_type)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long tmp;
+
+ physaddr &= PAGE_MASK;
+ pgdp = pgd_offset_k(virt_addr);
+ pmdp = srmmu_pmd_offset(pgdp, virt_addr);
+ ptep = srmmu_pte_offset(pmdp, virt_addr);
+ tmp = (physaddr >> 4) | SRMMU_ET_PTE;
+
+ /*
+ * I need to test whether this is consistent over all
+ * sun4m's. The bus_type represents the upper 4 bits of
+ * 36-bit physical address on the I/O space lines...
+ */
+ tmp |= (bus_type << 28);
+ tmp |= SRMMU_PRIV;
+ __flush_page_to_ram(virt_addr);
+ srmmu_set_pte(ptep, __pte(tmp));
+}
+
+static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
+ unsigned long xva, unsigned int len)
+{
+ while (len != 0) {
+ len -= PAGE_SIZE;
+ srmmu_mapioaddr(xpa, xva, bus);
+ xva += PAGE_SIZE;
+ xpa += PAGE_SIZE;
+ }
+ flush_tlb_all();
+}
+
+static inline void srmmu_unmapioaddr(unsigned long virt_addr)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_k(virt_addr);
+ pmdp = srmmu_pmd_offset(pgdp, virt_addr);
+ ptep = srmmu_pte_offset(pmdp, virt_addr);
+
+ /* No need to flush uncacheable page. */
+ srmmu_pte_clear(ptep);
+}
+
+static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
+{
+ while (len != 0) {
+ len -= PAGE_SIZE;
+ srmmu_unmapioaddr(virt_addr);
+ virt_addr += PAGE_SIZE;
+ }
+ flush_tlb_all();
+}
+
+/*
+ * On the SRMMU we do not have the problems with limited tlb entries
+ * for mapping kernel pages, so we just take things from the free page
+ * pool. As a side effect we are putting a little too much pressure
+ * on the gfp() subsystem. This setup also makes the logic of the
+ * iommu mapping code a lot easier as we can transparently handle
+ * mappings on the kernel stack without any special code as we did
+ * need on the sun4c.
+ */
+struct thread_info *srmmu_alloc_thread_info(void)
+{
+ struct thread_info *ret;
+
+ ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
+ THREAD_INFO_ORDER);
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ if (ret)
+ memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
+#endif /* DEBUG_STACK_USAGE */
+
+ return ret;
+}
+
+static void srmmu_free_thread_info(struct thread_info *ti)
+{
+ free_pages((unsigned long)ti, THREAD_INFO_ORDER);
+}
+
+/* tsunami.S */
+extern void tsunami_flush_cache_all(void);
+extern void tsunami_flush_cache_mm(struct mm_struct *mm);
+extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void tsunami_flush_page_to_ram(unsigned long page);
+extern void tsunami_flush_page_for_dma(unsigned long page);
+extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void tsunami_flush_tlb_all(void);
+extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
+extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void tsunami_setup_blockops(void);
+
+/*
+ * Workaround, until we find what's going on with Swift. When low on memory,
+ * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
+ * out it is already in page tables/ fault again on the same instruction.
+ * I really don't understand it, have checked it and contexts
+ * are right, flush_tlb_all is done as well, and it faults again...
+ * Strange. -jj
+ *
+ * The following code is a deadwood that may be necessary when
+ * we start to make precise page flushes again. --zaitcev
+ */
+static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+#if 0
+ static unsigned long last;
+ unsigned int val;
+ /* unsigned int n; */
+
+ if (address == last) {
+ val = srmmu_hwprobe(address);
+ if (val != 0 && pte_val(pte) != val) {
+ printk("swift_update_mmu_cache: "
+ "addr %lx put %08x probed %08x from %p\n",
+ address, pte_val(pte), val,
+ __builtin_return_address(0));
+ srmmu_flush_whole_tlb();
+ }
+ }
+ last = address;
+#endif
+}
+
+/* swift.S */
+extern void swift_flush_cache_all(void);
+extern void swift_flush_cache_mm(struct mm_struct *mm);
+extern void swift_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void swift_flush_page_to_ram(unsigned long page);
+extern void swift_flush_page_for_dma(unsigned long page);
+extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void swift_flush_tlb_all(void);
+extern void swift_flush_tlb_mm(struct mm_struct *mm);
+extern void swift_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+
+#if 0 /* P3: deadwood to debug precise flushes on Swift. */
+void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cctx, ctx1;
+
+ page &= PAGE_MASK;
+ if ((ctx1 = vma->vm_mm->context) != -1) {
+ cctx = srmmu_get_context();
+/* Is context # ever different from current context? P3 */
+ if (cctx != ctx1) {
+ printk("flush ctx %02x curr %02x\n", ctx1, cctx);
+ srmmu_set_context(ctx1);
+ swift_flush_page(page);
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (page), "i" (ASI_M_FLUSH_PROBE));
+ srmmu_set_context(cctx);
+ } else {
+ /* Rm. prot. bits from virt. c. */
+ /* swift_flush_cache_all(); */
+ /* swift_flush_cache_page(vma, page); */
+ swift_flush_page(page);
+
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (page), "i" (ASI_M_FLUSH_PROBE));
+ /* same as above: srmmu_flush_tlb_page() */
+ }
+ }
+}
+#endif
+
+/*
+ * The following are all MBUS based SRMMU modules, and therefore could
+ * be found in a multiprocessor configuration. On the whole, these
+ * chips seems to be much more touchy about DVMA and page tables
+ * with respect to cache coherency.
+ */
+
+/* Cypress flushes. */
+static void cypress_flush_cache_all(void)
+{
+ volatile unsigned long cypress_sucks;
+ unsigned long faddr, tagval;
+
+ flush_user_windows();
+ for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
+ __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
+ "=r" (tagval) :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+
+ /* If modified and valid, kick it. */
+ if((tagval & 0x60) == 0x60)
+ cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
+ }
+}
+
+static void cypress_flush_cache_mm(struct mm_struct *mm)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long flags, faddr;
+ int octx;
+
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ local_irq_save(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60;
+ d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+
+ faddr = (0x10000 - 0x100);
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr), "i" (ASI_M_FLUSH_CTX),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(faddr);
+ srmmu_set_context(octx);
+ local_irq_restore(flags);
+ FLUSH_END
+}
+
+static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long flags, faddr;
+ int octx;
+
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ local_irq_save(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60;
+ d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+
+ start &= SRMMU_REAL_PMD_MASK;
+ while(start < end) {
+ faddr = (start + (0x10000 - 0x100));
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr),
+ "i" (ASI_M_FLUSH_SEG),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while (faddr != start);
+ start += SRMMU_REAL_PMD_SIZE;
+ }
+ srmmu_set_context(octx);
+ local_irq_restore(flags);
+ FLUSH_END
+}
+
+static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long flags, line;
+ int octx;
+
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ local_irq_save(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60;
+ d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+ srmmu_set_context(octx);
+ local_irq_restore(flags);
+ FLUSH_END
+}
+
+/* Cypress is copy-back, at least that is how we configure it. */
+static void cypress_flush_page_to_ram(unsigned long page)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line;
+
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+/* Cypress is also IO cache coherent. */
+static void cypress_flush_page_for_dma(unsigned long page)
+{
+}
+
+/* Cypress has unified L2 VIPT, from which both instructions and data
+ * are stored. It does not have an onboard icache of any sort, therefore
+ * no flush is necessary.
+ */
+static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+}
+
+static void cypress_flush_tlb_all(void)
+{
+ srmmu_flush_whole_tlb();
+}
+
+static void cypress_flush_tlb_mm(struct mm_struct *mm)
+{
+ FLUSH_BEGIN(mm)
+ __asm__ __volatile__(
+ "lda [%0] %3, %%g5\n\t"
+ "sta %2, [%0] %3\n\t"
+ "sta %%g0, [%1] %4\n\t"
+ "sta %%g5, [%0] %3\n"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
+ "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
+ : "g5");
+ FLUSH_END
+}
+
+static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long size;
+
+ FLUSH_BEGIN(mm)
+ start &= SRMMU_PGDIR_MASK;
+ size = SRMMU_PGDIR_ALIGN(end) - start;
+ __asm__ __volatile__(
+ "lda [%0] %5, %%g5\n\t"
+ "sta %1, [%0] %5\n"
+ "1:\n\t"
+ "subcc %3, %4, %3\n\t"
+ "bne 1b\n\t"
+ " sta %%g0, [%2 + %3] %6\n\t"
+ "sta %%g5, [%0] %5\n"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
+ "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
+ "i" (ASI_M_FLUSH_PROBE)
+ : "g5", "cc");
+ FLUSH_END
+}
+
+static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ FLUSH_BEGIN(mm)
+ __asm__ __volatile__(
+ "lda [%0] %3, %%g5\n\t"
+ "sta %1, [%0] %3\n\t"
+ "sta %%g0, [%2] %4\n\t"
+ "sta %%g5, [%0] %3\n"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
+ "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
+ : "g5");
+ FLUSH_END
+}
+
+/* viking.S */
+extern void viking_flush_cache_all(void);
+extern void viking_flush_cache_mm(struct mm_struct *mm);
+extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void viking_flush_page_to_ram(unsigned long page);
+extern void viking_flush_page_for_dma(unsigned long page);
+extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
+extern void viking_flush_page(unsigned long page);
+extern void viking_mxcc_flush_page(unsigned long page);
+extern void viking_flush_tlb_all(void);
+extern void viking_flush_tlb_mm(struct mm_struct *mm);
+extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void viking_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+extern void sun4dsmp_flush_tlb_all(void);
+extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
+extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+
+/* hypersparc.S */
+extern void hypersparc_flush_cache_all(void);
+extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
+extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void hypersparc_flush_page_to_ram(unsigned long page);
+extern void hypersparc_flush_page_for_dma(unsigned long page);
+extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void hypersparc_flush_tlb_all(void);
+extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
+extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void hypersparc_setup_blockops(void);
+
+/*
+ * NOTE: All of this startup code assumes the low 16mb (approx.) of
+ * kernel mappings are done with one single contiguous chunk of
+ * ram. On small ram machines (classics mainly) we only get
+ * around 8mb mapped for us.
+ */
+
+void __init early_pgtable_allocfail(char *type)
+{
+ prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
+ prom_halt();
+}
+
+void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ while(start < end) {
+ pgdp = pgd_offset_k(start);
+ if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+ pmdp = (pmd_t *) __srmmu_get_nocache(
+ SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ if (pmdp == NULL)
+ early_pgtable_allocfail("pmd");
+ memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
+ srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+ }
+ pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+ if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+ if (ptep == NULL)
+ early_pgtable_allocfail("pte");
+ memset(__nocache_fix(ptep), 0, PTE_SIZE);
+ srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+ }
+ if (start > (0xffffffffUL - PMD_SIZE))
+ break;
+ start = (start + PMD_SIZE) & PMD_MASK;
+ }
+}
+
+void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ while(start < end) {
+ pgdp = pgd_offset_k(start);
+ if(srmmu_pgd_none(*pgdp)) {
+ pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ if (pmdp == NULL)
+ early_pgtable_allocfail("pmd");
+ memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
+ srmmu_pgd_set(pgdp, pmdp);
+ }
+ pmdp = srmmu_pmd_offset(pgdp, start);
+ if(srmmu_pmd_none(*pmdp)) {
+ ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
+ PTE_SIZE);
+ if (ptep == NULL)
+ early_pgtable_allocfail("pte");
+ memset(ptep, 0, PTE_SIZE);
+ srmmu_pmd_set(pmdp, ptep);
+ }
+ if (start > (0xffffffffUL - PMD_SIZE))
+ break;
+ start = (start + PMD_SIZE) & PMD_MASK;
+ }
+}
+
+/*
+ * This is much cleaner than poking around physical address space
+ * looking at the prom's page table directly which is what most
+ * other OS's do. Yuck... this is much better.
+ */
+void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
+ unsigned long prompte;
+
+ while(start <= end) {
+ if (start == 0)
+ break; /* probably wrap around */
+ if(start == 0xfef00000)
+ start = KADB_DEBUGGER_BEGVM;
+ if(!(prompte = srmmu_hwprobe(start))) {
+ start += PAGE_SIZE;
+ continue;
+ }
+
+ /* A red snapper, see what it really is. */
+ what = 0;
+
+ if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+ what = 1;
+ }
+
+ if(!(start & ~(SRMMU_PGDIR_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+ prompte)
+ what = 2;
+ }
+
+ pgdp = pgd_offset_k(start);
+ if(what == 2) {
+ *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
+ start += SRMMU_PGDIR_SIZE;
+ continue;
+ }
+ if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+ pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ if (pmdp == NULL)
+ early_pgtable_allocfail("pmd");
+ memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
+ srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+ }
+ pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+ if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
+ PTE_SIZE);
+ if (ptep == NULL)
+ early_pgtable_allocfail("pte");
+ memset(__nocache_fix(ptep), 0, PTE_SIZE);
+ srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+ }
+ if(what == 1) {
+ /*
+ * We bend the rule where all 16 PTPs in a pmd_t point
+ * inside the same PTE page, and we leak a perfectly
+ * good hardware PTE piece. Alternatives seem worse.
+ */
+ unsigned int x; /* Index of HW PMD in soft cluster */
+ x = (start >> PMD_SHIFT) & 15;
+ *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
+ start += SRMMU_REAL_PMD_SIZE;
+ continue;
+ }
+ ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
+ *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
+ start += PAGE_SIZE;
+ }
+}
+
+#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
+
+/* Create a third-level SRMMU 16MB page mapping. */
+static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
+{
+ pgd_t *pgdp = pgd_offset_k(vaddr);
+ unsigned long big_pte;
+
+ big_pte = KERNEL_PTE(phys_base >> 4);
+ *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
+}
+
+/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
+static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
+{
+ unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
+ unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
+ unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
+ /* Map "low" memory only */
+ const unsigned long min_vaddr = PAGE_OFFSET;
+ const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
+
+ if (vstart < min_vaddr || vstart >= max_vaddr)
+ return vstart;
+
+ if (vend > max_vaddr || vend < min_vaddr)
+ vend = max_vaddr;
+
+ while(vstart < vend) {
+ do_large_mapping(vstart, pstart);
+ vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
+ }
+ return vstart;
+}
+
+static inline void memprobe_error(char *msg)
+{
+ prom_printf(msg);
+ prom_printf("Halting now...\n");
+ prom_halt();
+}
+
+static inline void map_kernel(void)
+{
+ int i;
+
+ if (phys_base > 0) {
+ do_large_mapping(PAGE_OFFSET, phys_base);
+ }
+
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
+ }
+
+ BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);
+}
+
+/* Paging initialization on the Sparc Reference MMU. */
+extern void sparc_context_init(int);
+
+void (*poke_srmmu)(void) __initdata = NULL;
+
+extern unsigned long bootmem_init(unsigned long *pages_avail);
+
+void __init srmmu_paging_init(void)
+{
+ int i, cpunode;
+ char node_str[128];
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long pages_avail;
+
+ sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
+
+ if (sparc_cpu_model == sun4d)
+ num_contexts = 65536; /* We know it is Viking */
+ else {
+ /* Find the number of contexts on the srmmu. */
+ cpunode = prom_getchild(prom_root_node);
+ num_contexts = 0;
+ while(cpunode != 0) {
+ prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
+ if(!strcmp(node_str, "cpu")) {
+ num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
+ break;
+ }
+ cpunode = prom_getsibling(cpunode);
+ }
+ }
+
+ if(!num_contexts) {
+ prom_printf("Something wrong, can't find cpu node in paging_init.\n");
+ prom_halt();
+ }
+
+ pages_avail = 0;
+ last_valid_pfn = bootmem_init(&pages_avail);
+
+ srmmu_nocache_calcsize();
+ srmmu_nocache_init();
+ srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
+ map_kernel();
+
+ /* ctx table has to be physically aligned to its size */
+ srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
+ srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
+
+ for(i = 0; i < num_contexts; i++)
+ srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
+
+ flush_cache_all();
+ srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
+ flush_tlb_all();
+ poke_srmmu();
+
+#ifdef CONFIG_SUN_IO
+ srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
+ srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
+#endif
+
+ srmmu_allocate_ptable_skeleton(
+ __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
+ srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
+
+ pgd = pgd_offset_k(PKMAP_BASE);
+ pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
+ pte = srmmu_pte_offset(pmd, PKMAP_BASE);
+ pkmap_page_table = pte;
+
+ flush_cache_all();
+ flush_tlb_all();
+
+ sparc_context_init(num_contexts);
+
+ kmap_init();
+
+ {
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long zholes_size[MAX_NR_ZONES];
+ unsigned long npages;
+ int znum;
+
+ for (znum = 0; znum < MAX_NR_ZONES; znum++)
+ zones_size[znum] = zholes_size[znum] = 0;
+
+ npages = max_low_pfn - pfn_base;
+
+ zones_size[ZONE_DMA] = npages;
+ zholes_size[ZONE_DMA] = npages - pages_avail;
+
+ npages = highend_pfn - max_low_pfn;
+ zones_size[ZONE_HIGHMEM] = npages;
+ zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
+
+ free_area_init_node(0, &contig_page_data, zones_size,
+ pfn_base, zholes_size);
+ }
+}
+
+static void srmmu_mmu_info(struct seq_file *m)
+{
+ seq_printf(m,
+ "MMU type\t: %s\n"
+ "contexts\t: %d\n"
+ "nocache total\t: %ld\n"
+ "nocache used\t: %d\n",
+ srmmu_name,
+ num_contexts,
+ srmmu_nocache_size,
+ srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
+}
+
+static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+}
+
+static void srmmu_destroy_context(struct mm_struct *mm)
+{
+
+ if(mm->context != NO_CONTEXT) {
+ flush_cache_mm(mm);
+ srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
+ flush_tlb_mm(mm);
+ spin_lock(&srmmu_context_spinlock);
+ free_context(mm->context);
+ spin_unlock(&srmmu_context_spinlock);
+ mm->context = NO_CONTEXT;
+ }
+}
+
+/* Init various srmmu chip types. */
+static void __init srmmu_is_bad(void)
+{
+ prom_printf("Could not determine SRMMU chip type.\n");
+ prom_halt();
+}
+
+static void __init init_vac_layout(void)
+{
+ int nd, cache_lines;
+ char node_str[128];
+#ifdef CONFIG_SMP
+ int cpu = 0;
+ unsigned long max_size = 0;
+ unsigned long min_line_size = 0x10000000;
+#endif
+
+ nd = prom_getchild(prom_root_node);
+ while((nd = prom_getsibling(nd)) != 0) {
+ prom_getstring(nd, "device_type", node_str, sizeof(node_str));
+ if(!strcmp(node_str, "cpu")) {
+ vac_line_size = prom_getint(nd, "cache-line-size");
+ if (vac_line_size == -1) {
+ prom_printf("can't determine cache-line-size, "
+ "halting.\n");
+ prom_halt();
+ }
+ cache_lines = prom_getint(nd, "cache-nlines");
+ if (cache_lines == -1) {
+ prom_printf("can't determine cache-nlines, halting.\n");
+ prom_halt();
+ }
+
+ vac_cache_size = cache_lines * vac_line_size;
+#ifdef CONFIG_SMP
+ if(vac_cache_size > max_size)
+ max_size = vac_cache_size;
+ if(vac_line_size < min_line_size)
+ min_line_size = vac_line_size;
+ cpu++;
+ if (cpu >= NR_CPUS || !cpu_online(cpu))
+ break;
+#else
+ break;
+#endif
+ }
+ }
+ if(nd == 0) {
+ prom_printf("No CPU nodes found, halting.\n");
+ prom_halt();
+ }
+#ifdef CONFIG_SMP
+ vac_cache_size = max_size;
+ vac_line_size = min_line_size;
+#endif
+ printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
+ (int)vac_cache_size, (int)vac_line_size);
+}
+
+static void __init poke_hypersparc(void)
+{
+ volatile unsigned long clear;
+ unsigned long mreg = srmmu_get_mmureg();
+
+ hyper_flush_unconditional_combined();
+
+ mreg &= ~(HYPERSPARC_CWENABLE);
+ mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
+ mreg |= (HYPERSPARC_CMODE);
+
+ srmmu_set_mmureg(mreg);
+
+#if 0 /* XXX I think this is bad news... -DaveM */
+ hyper_clear_all_tags();
+#endif
+
+ put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
+ hyper_flush_whole_icache();
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+}
+
+static void __init init_hypersparc(void)
+{
+ srmmu_name = "ROSS HyperSparc";
+ srmmu_modtype = HyperSparc;
+
+ init_vac_layout();
+
+ is_hypersparc = 1;
+
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
+
+
+ poke_srmmu = poke_hypersparc;
+
+ hypersparc_setup_blockops();
+}
+
+static void __init poke_cypress(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+ unsigned long faddr, tagval;
+ volatile unsigned long cypress_sucks;
+ volatile unsigned long clear;
+
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+
+ if (!(mreg & CYPRESS_CENABLE)) {
+ for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
+ __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
+ "sta %%g0, [%0] %2\n\t" : :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+ }
+ } else {
+ for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
+ __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
+ "=r" (tagval) :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+
+ /* If modified and valid, kick it. */
+ if((tagval & 0x60) == 0x60)
+ cypress_sucks = *(unsigned long *)
+ (0xf0020000 + faddr);
+ }
+ }
+
+ /* And one more, for our good neighbor, Mr. Broken Cypress. */
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+
+ mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
+ srmmu_set_mmureg(mreg);
+}
+
+static void __init init_cypress_common(void)
+{
+ init_vac_layout();
+
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
+
+
+ BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
+
+ poke_srmmu = poke_cypress;
+}
+
+static void __init init_cypress_604(void)
+{
+ srmmu_name = "ROSS Cypress-604(UP)";
+ srmmu_modtype = Cypress;
+ init_cypress_common();
+}
+
+static void __init init_cypress_605(unsigned long mrev)
+{
+ srmmu_name = "ROSS Cypress-605(MP)";
+ if(mrev == 0xe) {
+ srmmu_modtype = Cypress_vE;
+ hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
+ } else {
+ if(mrev == 0xd) {
+ srmmu_modtype = Cypress_vD;
+ hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
+ } else {
+ srmmu_modtype = Cypress;
+ }
+ }
+ init_cypress_common();
+}
+
+static void __init poke_swift(void)
+{
+ unsigned long mreg;
+
+ /* Clear any crap from the cache or else... */
+ swift_flush_cache_all();
+
+ /* Enable I & D caches */
+ mreg = srmmu_get_mmureg();
+ mreg |= (SWIFT_IE | SWIFT_DE);
+ /*
+ * The Swift branch folding logic is completely broken. At
+ * trap time, if things are just right, if can mistakenly
+ * think that a trap is coming from kernel mode when in fact
+ * it is coming from user mode (it mis-executes the branch in
+ * the trap code). So you see things like crashme completely
+ * hosing your machine which is completely unacceptable. Turn
+ * this shit off... nice job Fujitsu.
+ */
+ mreg &= ~(SWIFT_BF);
+ srmmu_set_mmureg(mreg);
+}
+
+#define SWIFT_MASKID_ADDR 0x10003018
+static void __init init_swift(void)
+{
+ unsigned long swift_rev;
+
+ __asm__ __volatile__("lda [%1] %2, %0\n\t"
+ "srl %0, 0x18, %0\n\t" :
+ "=r" (swift_rev) :
+ "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
+ srmmu_name = "Fujitsu Swift";
+ switch(swift_rev) {
+ case 0x11:
+ case 0x20:
+ case 0x23:
+ case 0x30:
+ srmmu_modtype = Swift_lots_o_bugs;
+ hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
+ /*
+ * Gee george, I wonder why Sun is so hush hush about
+ * this hardware bug... really braindamage stuff going
+ * on here. However I think we can find a way to avoid
+ * all of the workaround overhead under Linux. Basically,
+ * any page fault can cause kernel pages to become user
+ * accessible (the mmu gets confused and clears some of
+ * the ACC bits in kernel ptes). Aha, sounds pretty
+ * horrible eh? But wait, after extensive testing it appears
+ * that if you use pgd_t level large kernel pte's (like the
+ * 4MB pages on the Pentium) the bug does not get tripped
+ * at all. This avoids almost all of the major overhead.
+ * Welcome to a world where your vendor tells you to,
+ * "apply this kernel patch" instead of "sorry for the
+ * broken hardware, send it back and we'll give you
+ * properly functioning parts"
+ */
+ break;
+ case 0x25:
+ case 0x31:
+ srmmu_modtype = Swift_bad_c;
+ hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
+ /*
+ * You see Sun allude to this hardware bug but never
+ * admit things directly, they'll say things like,
+ * "the Swift chip cache problems" or similar.
+ */
+ break;
+ default:
+ srmmu_modtype = Swift_ok;
+ break;
+ };
+
+ BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
+
+
+ BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
+
+ flush_page_for_dma_global = 0;
+
+ /*
+ * Are you now convinced that the Swift is one of the
+ * biggest VLSI abortions of all time? Bravo Fujitsu!
+ * Fujitsu, the !#?!%$'d up processor people. I bet if
+ * you examined the microcode of the Swift you'd find
+ * XXX's all over the place.
+ */
+ poke_srmmu = poke_swift;
+}
+
+static void turbosparc_flush_cache_all(void)
+{
+ flush_user_windows();
+ turbosparc_idflash_clear();
+}
+
+static void turbosparc_flush_cache_mm(struct mm_struct *mm)
+{
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ turbosparc_idflash_clear();
+ FLUSH_END
+}
+
+static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ FLUSH_BEGIN(vma->vm_mm)
+ flush_user_windows();
+ turbosparc_idflash_clear();
+ FLUSH_END
+}
+
+static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ FLUSH_BEGIN(vma->vm_mm)
+ flush_user_windows();
+ if (vma->vm_flags & VM_EXEC)
+ turbosparc_flush_icache();
+ turbosparc_flush_dcache();
+ FLUSH_END
+}
+
+/* TurboSparc is copy-back, if we turn it on, but this does not work. */
+static void turbosparc_flush_page_to_ram(unsigned long page)
+{
+#ifdef TURBOSPARC_WRITEBACK
+ volatile unsigned long clear;
+
+ if (srmmu_hwprobe(page))
+ turbosparc_flush_page_cache(page);
+ clear = srmmu_get_fstatus();
+#endif
+}
+
+static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+}
+
+static void turbosparc_flush_page_for_dma(unsigned long page)
+{
+ turbosparc_flush_dcache();
+}
+
+static void turbosparc_flush_tlb_all(void)
+{
+ srmmu_flush_whole_tlb();
+}
+
+static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
+{
+ FLUSH_BEGIN(mm)
+ srmmu_flush_whole_tlb();
+ FLUSH_END
+}
+
+static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ FLUSH_BEGIN(vma->vm_mm)
+ srmmu_flush_whole_tlb();
+ FLUSH_END
+}
+
+static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ FLUSH_BEGIN(vma->vm_mm)
+ srmmu_flush_whole_tlb();
+ FLUSH_END
+}
+
+
+static void __init poke_turbosparc(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+ unsigned long ccreg;
+
+ /* Clear any crap from the cache or else... */
+ turbosparc_flush_cache_all();
+ mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
+ mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
+ srmmu_set_mmureg(mreg);
+
+ ccreg = turbosparc_get_ccreg();
+
+#ifdef TURBOSPARC_WRITEBACK
+ ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
+ ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
+ /* Write-back D-cache, emulate VLSI
+ * abortion number three, not number one */
+#else
+ /* For now let's play safe, optimize later */
+ ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
+ /* Do DVMA snooping in Dcache, Write-thru D-cache */
+ ccreg &= ~(TURBOSPARC_uS2);
+ /* Emulate VLSI abortion number three, not number one */
+#endif
+
+ switch (ccreg & 7) {
+ case 0: /* No SE cache */
+ case 7: /* Test mode */
+ break;
+ default:
+ ccreg |= (TURBOSPARC_SCENABLE);
+ }
+ turbosparc_set_ccreg (ccreg);
+
+ mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
+ mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
+ srmmu_set_mmureg(mreg);
+}
+
+static void __init init_turbosparc(void)
+{
+ srmmu_name = "Fujitsu TurboSparc";
+ srmmu_modtype = TurboSparc;
+
+ BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
+
+ poke_srmmu = poke_turbosparc;
+}
+
+static void __init poke_tsunami(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+ mreg &= ~TSUNAMI_ITD;
+ mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
+ srmmu_set_mmureg(mreg);
+}
+
+static void __init init_tsunami(void)
+{
+ /*
+ * Tsunami's pretty sane, Sun and TI actually got it
+ * somewhat right this time. Fujitsu should have
+ * taken some lessons from them.
+ */
+
+ srmmu_name = "TI Tsunami";
+ srmmu_modtype = Tsunami;
+
+ BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
+
+
+ BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
+
+ poke_srmmu = poke_tsunami;
+
+ tsunami_setup_blockops();
+}
+
+static void __init poke_viking(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+ static int smp_catch;
+
+ if(viking_mxcc_present) {
+ unsigned long mxcc_control = mxcc_get_creg();
+
+ mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
+ mxcc_control &= ~(MXCC_CTL_RRC);
+ mxcc_set_creg(mxcc_control);
+
+ /*
+ * We don't need memory parity checks.
+ * XXX This is a mess, have to dig out later. ecd.
+ viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
+ */
+
+ /* We do cache ptables on MXCC. */
+ mreg |= VIKING_TCENABLE;
+ } else {
+ unsigned long bpreg;
+
+ mreg &= ~(VIKING_TCENABLE);
+ if(smp_catch++) {
+ /* Must disable mixed-cmd mode here for other cpu's. */
+ bpreg = viking_get_bpreg();
+ bpreg &= ~(VIKING_ACTION_MIX);
+ viking_set_bpreg(bpreg);
+
+ /* Just in case PROM does something funny. */
+ msi_set_sync();
+ }
+ }
+
+ mreg |= VIKING_SPENABLE;
+ mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
+ mreg |= VIKING_SBENABLE;
+ mreg &= ~(VIKING_ACENABLE);
+ srmmu_set_mmureg(mreg);
+
+#ifdef CONFIG_SMP
+ /* Avoid unnecessary cross calls. */
+ BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
+ BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
+ BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
+ BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
+ BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
+ BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
+ BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
+ btfixup();
+#endif
+}
+
+static void __init init_viking(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+
+ /* Ahhh, the viking. SRMMU VLSI abortion number two... */
+ if(mreg & VIKING_MMODE) {
+ srmmu_name = "TI Viking";
+ viking_mxcc_present = 0;
+ msi_set_sync();
+
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
+
+ /*
+ * We need this to make sure old viking takes no hits
+ * on it's cache for dma snoops to workaround the
+ * "load from non-cacheable memory" interrupt bug.
+ * This is only necessary because of the new way in
+ * which we use the IOMMU.
+ */
+ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
+
+ flush_page_for_dma_global = 0;
+ } else {
+ srmmu_name = "TI Viking/MXCC";
+ viking_mxcc_present = 1;
+
+ srmmu_cache_pagetables = 1;
+
+ /* MXCC vikings lack the DMA snooping bug. */
+ BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
+ }
+
+ BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
+
+#ifdef CONFIG_SMP
+ if (sparc_cpu_model == sun4d) {
+ BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
+ } else
+#endif
+ {
+ BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
+ }
+
+ BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
+
+ poke_srmmu = poke_viking;
+}
+
+/* Probe for the srmmu chip version. */
+static void __init get_srmmu_type(void)
+{
+ unsigned long mreg, psr;
+ unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
+
+ srmmu_modtype = SRMMU_INVAL_MOD;
+ hwbug_bitmask = 0;
+
+ mreg = srmmu_get_mmureg(); psr = get_psr();
+ mod_typ = (mreg & 0xf0000000) >> 28;
+ mod_rev = (mreg & 0x0f000000) >> 24;
+ psr_typ = (psr >> 28) & 0xf;
+ psr_vers = (psr >> 24) & 0xf;
+
+ /* First, check for HyperSparc or Cypress. */
+ if(mod_typ == 1) {
+ switch(mod_rev) {
+ case 7:
+ /* UP or MP Hypersparc */
+ init_hypersparc();
+ break;
+ case 0:
+ case 2:
+ /* Uniprocessor Cypress */
+ init_cypress_604();
+ break;
+ case 10:
+ case 11:
+ case 12:
+ /* _REALLY OLD_ Cypress MP chips... */
+ case 13:
+ case 14:
+ case 15:
+ /* MP Cypress mmu/cache-controller */
+ init_cypress_605(mod_rev);
+ break;
+ default:
+ /* Some other Cypress revision, assume a 605. */
+ init_cypress_605(mod_rev);
+ break;
+ };
+ return;
+ }
+
+ /*
+ * Now Fujitsu TurboSparc. It might happen that it is
+ * in Swift emulation mode, so we will check later...
+ */
+ if (psr_typ == 0 && psr_vers == 5) {
+ init_turbosparc();
+ return;
+ }
+
+ /* Next check for Fujitsu Swift. */
+ if(psr_typ == 0 && psr_vers == 4) {
+ int cpunode;
+ char node_str[128];
+
+ /* Look if it is not a TurboSparc emulating Swift... */
+ cpunode = prom_getchild(prom_root_node);
+ while((cpunode = prom_getsibling(cpunode)) != 0) {
+ prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
+ if(!strcmp(node_str, "cpu")) {
+ if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
+ prom_getintdefault(cpunode, "psr-version", 1) == 5) {
+ init_turbosparc();
+ return;
+ }
+ break;
+ }
+ }
+
+ init_swift();
+ return;
+ }
+
+ /* Now the Viking family of srmmu. */
+ if(psr_typ == 4 &&
+ ((psr_vers == 0) ||
+ ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
+ init_viking();
+ return;
+ }
+
+ /* Finally the Tsunami. */
+ if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
+ init_tsunami();
+ return;
+ }
+
+ /* Oh well */
+ srmmu_is_bad();
+}
+
+/* don't laugh, static pagetables */
+static void srmmu_check_pgt_cache(int low, int high)
+{
+}
+
+extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
+ tsetup_mmu_patchme, rtrap_mmu_patchme;
+
+extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
+ tsetup_srmmu_stackchk, srmmu_rett_stackchk;
+
+extern unsigned long srmmu_fault;
+
+#define PATCH_BRANCH(insn, dest) do { \
+ iaddr = &(insn); \
+ daddr = &(dest); \
+ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
+ } while(0)
+
+static void __init patch_window_trap_handlers(void)
+{
+ unsigned long *iaddr, *daddr;
+
+ PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
+ PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
+ PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
+ PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
+}
+
+#ifdef CONFIG_SMP
+/* Local cross-calls. */
+static void smp_flush_page_for_dma(unsigned long page)
+{
+ xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
+ local_flush_page_for_dma(page);
+}
+
+#endif
+
+static pte_t srmmu_pgoff_to_pte(unsigned long pgoff)
+{
+ return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
+}
+
+static unsigned long srmmu_pte_to_pgoff(pte_t pte)
+{
+ return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
+}
+
+/* Load up routines and constants for sun4m and sun4d mmu */
+void __init ld_mmu_srmmu(void)
+{
+ extern void ld_mmu_iommu(void);
+ extern void ld_mmu_iounit(void);
+ extern void ___xchg32_sun4md(void);
+
+ BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
+ BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
+ BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
+
+ BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
+ BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
+
+ BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
+ BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+ pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
+
+ /* Functions */
+#ifndef CONFIG_SMP
+ BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
+#endif
+ BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
+ BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_SETHI(none_mask, 0xF0000000);
+
+ BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
+ BTFIXUPSET_CALL(pte_read, srmmu_pte_read, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
+
+ BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
+
+ BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
+ BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
+ BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
+ BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
+ BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
+ BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
+ BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
+ BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
+ BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
+ BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
+ BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
+ BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM);
+
+ get_srmmu_type();
+ patch_window_trap_handlers();
+
+#ifdef CONFIG_SMP
+ /* El switcheroo... */
+
+ BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
+ BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
+ BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
+ BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
+ BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
+ BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
+ BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
+ BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
+ BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
+ BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
+ BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
+
+ BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
+ if (sparc_cpu_model != sun4d) {
+ BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
+ }
+ BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
+#endif
+
+ if (sparc_cpu_model == sun4d)
+ ld_mmu_iounit();
+ else
+ ld_mmu_iommu();
+#ifdef CONFIG_SMP
+ if (sparc_cpu_model == sun4d)
+ sun4d_init_smp();
+ else
+ sun4m_init_smp();
+#endif
+}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
new file mode 100644
index 000000000000..1d560390e282
--- /dev/null
+++ b/arch/sparc/mm/sun4c.c
@@ -0,0 +1,2276 @@
+/* $Id: sun4c.c,v 1.212 2001/12/21 04:56:15 davem Exp $
+ * sun4c.c: Doing in software what should be done in hardware.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
+ * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org)
+ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define NR_TASK_BUCKETS 512
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/scatterlist.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/memreg.h>
+#include <asm/processor.h>
+#include <asm/auxio.h>
+#include <asm/io.h>
+#include <asm/oplib.h>
+#include <asm/openprom.h>
+#include <asm/mmu_context.h>
+#include <asm/sun4paddr.h>
+#include <asm/highmem.h>
+#include <asm/btfixup.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+/* Because of our dynamic kernel TLB miss strategy, and how
+ * our DVMA mapping allocation works, you _MUST_:
+ *
+ * 1) Disable interrupts _and_ not touch any dynamic kernel
+ * memory while messing with kernel MMU state. By
+ * dynamic memory I mean any object which is not in
+ * the kernel image itself or a thread_union (both of
+ * which are locked into the MMU).
+ * 2) Disable interrupts while messing with user MMU state.
+ */
+
+extern int num_segmaps, num_contexts;
+
+extern unsigned long page_kernel;
+
+#ifdef CONFIG_SUN4
+#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
+#else
+/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
+ * So let's save some cycles and just use that everywhere except for that bootup
+ * sanity check.
+ */
+#define SUN4C_VAC_SIZE 65536
+#endif
+
+#define SUN4C_KERNEL_BUCKETS 32
+
+/* Flushing the cache. */
+struct sun4c_vac_props sun4c_vacinfo;
+unsigned long sun4c_kernel_faults;
+
+/* Invalidate every sun4c cache line tag. */
+static void __init sun4c_flush_all(void)
+{
+ unsigned long begin, end;
+
+ if (sun4c_vacinfo.on)
+ panic("SUN4C: AIEEE, trying to invalidate vac while it is on.");
+
+ /* Clear 'valid' bit in all cache line tags */
+ begin = AC_CACHETAGS;
+ end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
+ while (begin < end) {
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (begin), "i" (ASI_CONTROL));
+ begin += sun4c_vacinfo.linesize;
+ }
+}
+
+static void sun4c_flush_context_hw(void)
+{
+ unsigned long end = SUN4C_VAC_SIZE;
+
+ __asm__ __volatile__(
+ "1: addcc %0, -4096, %0\n\t"
+ " bne 1b\n\t"
+ " sta %%g0, [%0] %2"
+ : "=&r" (end)
+ : "0" (end), "i" (ASI_HWFLUSHCONTEXT)
+ : "cc");
+}
+
+/* Must be called minimally with IRQs disabled. */
+static void sun4c_flush_segment_hw(unsigned long addr)
+{
+ if (sun4c_get_segmap(addr) != invalid_segment) {
+ unsigned long vac_size = SUN4C_VAC_SIZE;
+
+ __asm__ __volatile__(
+ "1: addcc %0, -4096, %0\n\t"
+ " bne 1b\n\t"
+ " sta %%g0, [%2 + %0] %3"
+ : "=&r" (vac_size)
+ : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
+ : "cc");
+ }
+}
+
+/* File local boot time fixups. */
+BTFIXUPDEF_CALL(void, sun4c_flush_page, unsigned long)
+BTFIXUPDEF_CALL(void, sun4c_flush_segment, unsigned long)
+BTFIXUPDEF_CALL(void, sun4c_flush_context, void)
+
+#define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr)
+#define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr)
+#define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)()
+
+/* Must be called minimally with interrupts disabled. */
+static void sun4c_flush_page_hw(unsigned long addr)
+{
+ addr &= PAGE_MASK;
+ if ((int)sun4c_get_pte(addr) < 0)
+ __asm__ __volatile__("sta %%g0, [%0] %1"
+ : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
+}
+
+/* Don't inline the software version as it eats too many cache lines if expanded. */
+static void sun4c_flush_context_sw(void)
+{
+ unsigned long nbytes = SUN4C_VAC_SIZE;
+ unsigned long lsize = sun4c_vacinfo.linesize;
+
+ __asm__ __volatile__(
+ "add %2, %2, %%g1\n\t"
+ "add %2, %%g1, %%g2\n\t"
+ "add %2, %%g2, %%g3\n\t"
+ "add %2, %%g3, %%g4\n\t"
+ "add %2, %%g4, %%g5\n\t"
+ "add %2, %%g5, %%o4\n\t"
+ "add %2, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %0, %%o5, %0\n\t"
+ "sta %%g0, [%0] %3\n\t"
+ "sta %%g0, [%0 + %2] %3\n\t"
+ "sta %%g0, [%0 + %%g1] %3\n\t"
+ "sta %%g0, [%0 + %%g2] %3\n\t"
+ "sta %%g0, [%0 + %%g3] %3\n\t"
+ "sta %%g0, [%0 + %%g4] %3\n\t"
+ "sta %%g0, [%0 + %%g5] %3\n\t"
+ "bg 1b\n\t"
+ " sta %%g0, [%1 + %%o4] %3\n"
+ : "=&r" (nbytes)
+ : "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX)
+ : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
+}
+
+/* Don't inline the software version as it eats too many cache lines if expanded. */
+static void sun4c_flush_segment_sw(unsigned long addr)
+{
+ if (sun4c_get_segmap(addr) != invalid_segment) {
+ unsigned long nbytes = SUN4C_VAC_SIZE;
+ unsigned long lsize = sun4c_vacinfo.linesize;
+
+ __asm__ __volatile__(
+ "add %2, %2, %%g1\n\t"
+ "add %2, %%g1, %%g2\n\t"
+ "add %2, %%g2, %%g3\n\t"
+ "add %2, %%g3, %%g4\n\t"
+ "add %2, %%g4, %%g5\n\t"
+ "add %2, %%g5, %%o4\n\t"
+ "add %2, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %1, %%o5, %1\n\t"
+ "sta %%g0, [%0] %6\n\t"
+ "sta %%g0, [%0 + %2] %6\n\t"
+ "sta %%g0, [%0 + %%g1] %6\n\t"
+ "sta %%g0, [%0 + %%g2] %6\n\t"
+ "sta %%g0, [%0 + %%g3] %6\n\t"
+ "sta %%g0, [%0 + %%g4] %6\n\t"
+ "sta %%g0, [%0 + %%g5] %6\n\t"
+ "sta %%g0, [%0 + %%o4] %6\n\t"
+ "bg 1b\n\t"
+ " add %0, %%o5, %0\n"
+ : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
+ : "0" (addr), "1" (nbytes), "2" (lsize),
+ "i" (ASI_FLUSHSEG)
+ : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
+ }
+}
+
+/* Don't inline the software version as it eats too many cache lines if expanded. */
+static void sun4c_flush_page_sw(unsigned long addr)
+{
+ addr &= PAGE_MASK;
+ if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
+ _SUN4C_PAGE_VALID) {
+ unsigned long left = PAGE_SIZE;
+ unsigned long lsize = sun4c_vacinfo.linesize;
+
+ __asm__ __volatile__(
+ "add %2, %2, %%g1\n\t"
+ "add %2, %%g1, %%g2\n\t"
+ "add %2, %%g2, %%g3\n\t"
+ "add %2, %%g3, %%g4\n\t"
+ "add %2, %%g4, %%g5\n\t"
+ "add %2, %%g5, %%o4\n\t"
+ "add %2, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %1, %%o5, %1\n\t"
+ "sta %%g0, [%0] %6\n\t"
+ "sta %%g0, [%0 + %2] %6\n\t"
+ "sta %%g0, [%0 + %%g1] %6\n\t"
+ "sta %%g0, [%0 + %%g2] %6\n\t"
+ "sta %%g0, [%0 + %%g3] %6\n\t"
+ "sta %%g0, [%0 + %%g4] %6\n\t"
+ "sta %%g0, [%0 + %%g5] %6\n\t"
+ "sta %%g0, [%0 + %%o4] %6\n\t"
+ "bg 1b\n\t"
+ " add %0, %%o5, %0\n"
+ : "=&r" (addr), "=&r" (left), "=&r" (lsize)
+ : "0" (addr), "1" (left), "2" (lsize),
+ "i" (ASI_FLUSHPG)
+ : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
+ }
+}
+
+/* The sun4c's do have an on chip store buffer. And the way you
+ * clear them out isn't so obvious. The only way I can think of
+ * to accomplish this is to read the current context register,
+ * store the same value there, then read an external hardware
+ * register.
+ */
+void sun4c_complete_all_stores(void)
+{
+ volatile int _unused;
+
+ _unused = sun4c_get_context();
+ sun4c_set_context(_unused);
+#ifdef CONFIG_SUN_AUXIO
+ _unused = get_auxio();
+#endif
+}
+
+/* Bootup utility functions. */
+static inline void sun4c_init_clean_segmap(unsigned char pseg)
+{
+ unsigned long vaddr;
+
+ sun4c_put_segmap(0, pseg);
+ for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
+ sun4c_put_pte(vaddr, 0);
+ sun4c_put_segmap(0, invalid_segment);
+}
+
+static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
+{
+ unsigned long vaddr;
+ unsigned char savectx, ctx;
+
+ savectx = sun4c_get_context();
+ kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
+ for (ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ }
+ sun4c_set_context(savectx);
+}
+
+void __init sun4c_probe_vac(void)
+{
+ sun4c_disable_vac();
+
+ if (ARCH_SUN4) {
+ switch (idprom->id_machtype) {
+
+ case (SM_SUN4|SM_4_110):
+ sun4c_vacinfo.type = VAC_NONE;
+ sun4c_vacinfo.num_bytes = 0;
+ sun4c_vacinfo.linesize = 0;
+ sun4c_vacinfo.do_hwflushes = 0;
+ prom_printf("No VAC. Get some bucks and buy a real computer.");
+ prom_halt();
+ break;
+
+ case (SM_SUN4|SM_4_260):
+ sun4c_vacinfo.type = VAC_WRITE_BACK;
+ sun4c_vacinfo.num_bytes = 128 * 1024;
+ sun4c_vacinfo.linesize = 16;
+ sun4c_vacinfo.do_hwflushes = 0;
+ break;
+
+ case (SM_SUN4|SM_4_330):
+ sun4c_vacinfo.type = VAC_WRITE_THROUGH;
+ sun4c_vacinfo.num_bytes = 128 * 1024;
+ sun4c_vacinfo.linesize = 16;
+ sun4c_vacinfo.do_hwflushes = 0;
+ break;
+
+ case (SM_SUN4|SM_4_470):
+ sun4c_vacinfo.type = VAC_WRITE_BACK;
+ sun4c_vacinfo.num_bytes = 128 * 1024;
+ sun4c_vacinfo.linesize = 32;
+ sun4c_vacinfo.do_hwflushes = 0;
+ break;
+
+ default:
+ prom_printf("Cannot initialize VAC - weird sun4 model idprom->id_machtype = %d", idprom->id_machtype);
+ prom_halt();
+ };
+ } else {
+ sun4c_vacinfo.type = VAC_WRITE_THROUGH;
+
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ /* PROM on SS1 lacks this info, to be super safe we
+ * hard code it here since this arch is cast in stone.
+ */
+ sun4c_vacinfo.num_bytes = 65536;
+ sun4c_vacinfo.linesize = 16;
+ } else {
+ sun4c_vacinfo.num_bytes =
+ prom_getintdefault(prom_root_node, "vac-size", 65536);
+ sun4c_vacinfo.linesize =
+ prom_getintdefault(prom_root_node, "vac-linesize", 16);
+ }
+ sun4c_vacinfo.do_hwflushes =
+ prom_getintdefault(prom_root_node, "vac-hwflush", 0);
+
+ if (sun4c_vacinfo.do_hwflushes == 0)
+ sun4c_vacinfo.do_hwflushes =
+ prom_getintdefault(prom_root_node, "vac_hwflush", 0);
+
+ if (sun4c_vacinfo.num_bytes != 65536) {
+ prom_printf("WEIRD Sun4C VAC cache size, "
+ "tell sparclinux@vger.kernel.org");
+ prom_halt();
+ }
+ }
+
+ sun4c_vacinfo.num_lines =
+ (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
+ switch (sun4c_vacinfo.linesize) {
+ case 16:
+ sun4c_vacinfo.log2lsize = 4;
+ break;
+ case 32:
+ sun4c_vacinfo.log2lsize = 5;
+ break;
+ default:
+ prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
+ sun4c_vacinfo.linesize);
+ prom_halt();
+ };
+
+ sun4c_flush_all();
+ sun4c_enable_vac();
+}
+
+/* Patch instructions for the low level kernel fault handler. */
+extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
+extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
+extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
+extern unsigned long num_context_patch1, num_context_patch1_16;
+extern unsigned long num_context_patch2_16;
+extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
+extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on;
+extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
+
+#define PATCH_INSN(src, dst) do { \
+ daddr = &(dst); \
+ iaddr = &(src); \
+ *daddr = *iaddr; \
+ } while (0)
+
+static void __init patch_kernel_fault_handler(void)
+{
+ unsigned long *iaddr, *daddr;
+
+ switch (num_segmaps) {
+ case 128:
+ /* Default, nothing to do. */
+ break;
+ case 256:
+ PATCH_INSN(invalid_segment_patch1_ff,
+ invalid_segment_patch1);
+ PATCH_INSN(invalid_segment_patch2_ff,
+ invalid_segment_patch2);
+ break;
+ case 512:
+ PATCH_INSN(invalid_segment_patch1_1ff,
+ invalid_segment_patch1);
+ PATCH_INSN(invalid_segment_patch2_1ff,
+ invalid_segment_patch2);
+ break;
+ default:
+ prom_printf("Unhandled number of segmaps: %d\n",
+ num_segmaps);
+ prom_halt();
+ };
+ switch (num_contexts) {
+ case 8:
+ /* Default, nothing to do. */
+ break;
+ case 16:
+ PATCH_INSN(num_context_patch1_16,
+ num_context_patch1);
+ break;
+ default:
+ prom_printf("Unhandled number of contexts: %d\n",
+ num_contexts);
+ prom_halt();
+ };
+
+ if (sun4c_vacinfo.do_hwflushes != 0) {
+ PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
+ PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
+ } else {
+ switch (sun4c_vacinfo.linesize) {
+ case 16:
+ /* Default, nothing to do. */
+ break;
+ case 32:
+ PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch);
+ break;
+ default:
+ prom_printf("Impossible VAC linesize %d, halting...\n",
+ sun4c_vacinfo.linesize);
+ prom_halt();
+ };
+ }
+}
+
+static void __init sun4c_probe_mmu(void)
+{
+ if (ARCH_SUN4) {
+ switch (idprom->id_machtype) {
+ case (SM_SUN4|SM_4_110):
+ prom_printf("No support for 4100 yet\n");
+ prom_halt();
+ num_segmaps = 256;
+ num_contexts = 8;
+ break;
+
+ case (SM_SUN4|SM_4_260):
+ /* should be 512 segmaps. when it get fixed */
+ num_segmaps = 256;
+ num_contexts = 16;
+ break;
+
+ case (SM_SUN4|SM_4_330):
+ num_segmaps = 256;
+ num_contexts = 16;
+ break;
+
+ case (SM_SUN4|SM_4_470):
+ /* should be 1024 segmaps. when it get fixed */
+ num_segmaps = 256;
+ num_contexts = 64;
+ break;
+ default:
+ prom_printf("Invalid SUN4 model\n");
+ prom_halt();
+ };
+ } else {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ /* Hardcode these just to be safe, PROM on SS1 does
+ * not have this info available in the root node.
+ */
+ num_segmaps = 128;
+ num_contexts = 8;
+ } else {
+ num_segmaps =
+ prom_getintdefault(prom_root_node, "mmu-npmg", 128);
+ num_contexts =
+ prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
+ }
+ }
+ patch_kernel_fault_handler();
+}
+
+volatile unsigned long *sun4c_memerr_reg = NULL;
+
+void __init sun4c_probe_memerr_reg(void)
+{
+ int node;
+ struct linux_prom_registers regs[1];
+
+ if (ARCH_SUN4) {
+ sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE);
+ } else {
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(prom_root_node, "memory-error");
+ if (!node)
+ return;
+ if (prom_getproperty(node, "reg", (char *)regs, sizeof(regs)) <= 0)
+ return;
+ /* hmm I think regs[0].which_io is zero here anyways */
+ sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
+ }
+}
+
+static inline void sun4c_init_ss2_cache_bug(void)
+{
+ extern unsigned long start;
+
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
+ (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
+ /* Whee.. */
+ printk("SS2 cache bug detected, uncaching trap table page\n");
+ sun4c_flush_page((unsigned int) &start);
+ sun4c_put_pte(((unsigned long) &start),
+ (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE));
+ }
+}
+
+/* Addr is always aligned on a page boundary for us already. */
+static int sun4c_map_dma_area(dma_addr_t *pba, unsigned long va,
+ unsigned long addr, int len)
+{
+ unsigned long page, end;
+
+ *pba = addr;
+
+ end = PAGE_ALIGN((addr + len));
+ while (addr < end) {
+ page = va;
+ sun4c_flush_page(page);
+ page -= PAGE_OFFSET;
+ page >>= PAGE_SHIFT;
+ page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY |
+ _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
+ sun4c_put_pte(addr, page);
+ addr += PAGE_SIZE;
+ va += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static struct page *sun4c_translate_dvma(unsigned long busa)
+{
+ /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
+ unsigned long pte = sun4c_get_pte(busa);
+ return pfn_to_page(pte & SUN4C_PFN_MASK);
+}
+
+static void sun4c_unmap_dma_area(unsigned long busa, int len)
+{
+ /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
+ /* XXX Implement this */
+}
+
+/* TLB management. */
+
+/* Don't change this struct without changing entry.S. This is used
+ * in the in-window kernel fault handler, and you don't want to mess
+ * with that. (See sun4c_fault in entry.S).
+ */
+struct sun4c_mmu_entry {
+ struct sun4c_mmu_entry *next;
+ struct sun4c_mmu_entry *prev;
+ unsigned long vaddr;
+ unsigned char pseg;
+ unsigned char locked;
+
+ /* For user mappings only, and completely hidden from kernel
+ * TLB miss code.
+ */
+ unsigned char ctx;
+ struct sun4c_mmu_entry *lru_next;
+ struct sun4c_mmu_entry *lru_prev;
+};
+
+static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
+
+static void __init sun4c_init_mmu_entry_pool(void)
+{
+ int i;
+
+ for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
+ mmu_entry_pool[i].pseg = i;
+ mmu_entry_pool[i].next = NULL;
+ mmu_entry_pool[i].prev = NULL;
+ mmu_entry_pool[i].vaddr = 0;
+ mmu_entry_pool[i].locked = 0;
+ mmu_entry_pool[i].ctx = 0;
+ mmu_entry_pool[i].lru_next = NULL;
+ mmu_entry_pool[i].lru_prev = NULL;
+ }
+ mmu_entry_pool[invalid_segment].locked = 1;
+}
+
+static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
+ unsigned long bits_off)
+{
+ unsigned long start, end;
+
+ end = vaddr + SUN4C_REAL_PGDIR_SIZE;
+ for (start = vaddr; start < end; start += PAGE_SIZE)
+ if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
+ sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
+ ~bits_off);
+}
+
+static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
+{
+ unsigned long vaddr;
+ unsigned char pseg, ctx;
+#ifdef CONFIG_SUN4
+ /* sun4/110 and 260 have no kadb. */
+ if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
+ (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
+#endif
+ for (vaddr = KADB_DEBUGGER_BEGVM;
+ vaddr < LINUX_OPPROM_ENDVM;
+ vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ pseg = sun4c_get_segmap(vaddr);
+ if (pseg != invalid_segment) {
+ mmu_entry_pool[pseg].locked = 1;
+ for (ctx = 0; ctx < num_contexts; ctx++)
+ prom_putsegment(ctx, vaddr, pseg);
+ fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
+ }
+ }
+#ifdef CONFIG_SUN4
+ }
+#endif
+ for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ pseg = sun4c_get_segmap(vaddr);
+ mmu_entry_pool[pseg].locked = 1;
+ for (ctx = 0; ctx < num_contexts; ctx++)
+ prom_putsegment(ctx, vaddr, pseg);
+ fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
+ }
+}
+
+static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
+{
+ int i, ctx;
+
+ while (start < end) {
+ for (i = 0; i < invalid_segment; i++)
+ if (!mmu_entry_pool[i].locked)
+ break;
+ mmu_entry_pool[i].locked = 1;
+ sun4c_init_clean_segmap(i);
+ for (ctx = 0; ctx < num_contexts; ctx++)
+ prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
+ start += SUN4C_REAL_PGDIR_SIZE;
+ }
+}
+
+/* Don't change this struct without changing entry.S. This is used
+ * in the in-window kernel fault handler, and you don't want to mess
+ * with that. (See sun4c_fault in entry.S).
+ */
+struct sun4c_mmu_ring {
+ struct sun4c_mmu_entry ringhd;
+ int num_entries;
+};
+
+static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
+static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
+static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */
+struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
+struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
+
+static inline void sun4c_init_rings(void)
+{
+ int i;
+
+ for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
+ sun4c_context_ring[i].ringhd.next =
+ sun4c_context_ring[i].ringhd.prev =
+ &sun4c_context_ring[i].ringhd;
+ sun4c_context_ring[i].num_entries = 0;
+ }
+ sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
+ &sun4c_ufree_ring.ringhd;
+ sun4c_ufree_ring.num_entries = 0;
+ sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
+ &sun4c_ulru_ring.ringhd;
+ sun4c_ulru_ring.num_entries = 0;
+ sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
+ &sun4c_kernel_ring.ringhd;
+ sun4c_kernel_ring.num_entries = 0;
+ sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev =
+ &sun4c_kfree_ring.ringhd;
+ sun4c_kfree_ring.num_entries = 0;
+}
+
+static void add_ring(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+
+ entry->prev = head;
+ (entry->next = head->next)->prev = entry;
+ head->next = entry;
+ ring->num_entries++;
+}
+
+static __inline__ void add_lru(struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+
+ entry->lru_next = head;
+ (entry->lru_prev = head->lru_prev)->lru_next = entry;
+ head->lru_prev = entry;
+}
+
+static void add_ring_ordered(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+ unsigned long addr = entry->vaddr;
+
+ while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
+ head = head->next;
+
+ entry->prev = head;
+ (entry->next = head->next)->prev = entry;
+ head->next = entry;
+ ring->num_entries++;
+
+ add_lru(entry);
+}
+
+static __inline__ void remove_ring(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *next = entry->next;
+
+ (next->prev = entry->prev)->next = next;
+ ring->num_entries--;
+}
+
+static void remove_lru(struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *next = entry->lru_next;
+
+ (next->lru_prev = entry->lru_prev)->lru_next = next;
+}
+
+static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+{
+ remove_ring(sun4c_context_ring+ctx, entry);
+ remove_lru(entry);
+ add_ring(&sun4c_ufree_ring, entry);
+}
+
+static void free_kernel_entry(struct sun4c_mmu_entry *entry,
+ struct sun4c_mmu_ring *ring)
+{
+ remove_ring(ring, entry);
+ add_ring(&sun4c_kfree_ring, entry);
+}
+
+static void __init sun4c_init_fill_kernel_ring(int howmany)
+{
+ int i;
+
+ while (howmany) {
+ for (i = 0; i < invalid_segment; i++)
+ if (!mmu_entry_pool[i].locked)
+ break;
+ mmu_entry_pool[i].locked = 1;
+ sun4c_init_clean_segmap(i);
+ add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]);
+ howmany--;
+ }
+}
+
+static void __init sun4c_init_fill_user_ring(void)
+{
+ int i;
+
+ for (i = 0; i < invalid_segment; i++) {
+ if (mmu_entry_pool[i].locked)
+ continue;
+ sun4c_init_clean_segmap(i);
+ add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
+ }
+}
+
+static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
+{
+ int savectx, ctx;
+
+ savectx = sun4c_get_context();
+ for (ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(kentry->vaddr, invalid_segment);
+ }
+ sun4c_set_context(savectx);
+}
+
+static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
+{
+ int savectx, ctx;
+
+ savectx = sun4c_get_context();
+ for (ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(kentry->vaddr, kentry->pseg);
+ }
+ sun4c_set_context(savectx);
+}
+
+#define sun4c_user_unmap(__entry) \
+ sun4c_put_segmap((__entry)->vaddr, invalid_segment)
+
+static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
+{
+ struct sun4c_mmu_entry *head = &crp->ringhd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
+
+ flush_user_windows();
+ sun4c_set_context(ctx);
+ sun4c_flush_context();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
+
+ sun4c_user_unmap(entry);
+ free_user_entry(ctx, entry);
+
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ local_irq_restore(flags);
+}
+
+static int sun4c_user_taken_entries; /* This is how much we have. */
+static int max_user_taken_entries; /* This limits us and prevents deadlock. */
+
+static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
+{
+ struct sun4c_mmu_entry *this_entry;
+
+ /* If some are free, return first one. */
+ if (sun4c_kfree_ring.num_entries) {
+ this_entry = sun4c_kfree_ring.ringhd.next;
+ return this_entry;
+ }
+
+ /* Else free one up. */
+ this_entry = sun4c_kernel_ring.ringhd.prev;
+ sun4c_flush_segment(this_entry->vaddr);
+ sun4c_kernel_unmap(this_entry);
+ free_kernel_entry(this_entry, &sun4c_kernel_ring);
+ this_entry = sun4c_kfree_ring.ringhd.next;
+
+ return this_entry;
+}
+
+/* Using this method to free up mmu entries eliminates a lot of
+ * potential races since we have a kernel that incurs tlb
+ * replacement faults. There may be performance penalties.
+ *
+ * NOTE: Must be called with interrupts disabled.
+ */
+static struct sun4c_mmu_entry *sun4c_user_strategy(void)
+{
+ struct sun4c_mmu_entry *entry;
+ unsigned char ctx;
+ int savectx;
+
+ /* If some are free, return first one. */
+ if (sun4c_ufree_ring.num_entries) {
+ entry = sun4c_ufree_ring.ringhd.next;
+ goto unlink_out;
+ }
+
+ if (sun4c_user_taken_entries) {
+ entry = sun4c_kernel_strategy();
+ sun4c_user_taken_entries--;
+ goto kunlink_out;
+ }
+
+ /* Grab from the beginning of the LRU list. */
+ entry = sun4c_ulru_ring.ringhd.lru_next;
+ ctx = entry->ctx;
+
+ savectx = sun4c_get_context();
+ flush_user_windows();
+ sun4c_set_context(ctx);
+ sun4c_flush_segment(entry->vaddr);
+ sun4c_user_unmap(entry);
+ remove_ring(sun4c_context_ring + ctx, entry);
+ remove_lru(entry);
+ sun4c_set_context(savectx);
+
+ return entry;
+
+unlink_out:
+ remove_ring(&sun4c_ufree_ring, entry);
+ return entry;
+kunlink_out:
+ remove_ring(&sun4c_kfree_ring, entry);
+ return entry;
+}
+
+/* NOTE: Must be called with interrupts disabled. */
+void sun4c_grow_kernel_ring(void)
+{
+ struct sun4c_mmu_entry *entry;
+
+ /* Prevent deadlock condition. */
+ if (sun4c_user_taken_entries >= max_user_taken_entries)
+ return;
+
+ if (sun4c_ufree_ring.num_entries) {
+ entry = sun4c_ufree_ring.ringhd.next;
+ remove_ring(&sun4c_ufree_ring, entry);
+ add_ring(&sun4c_kfree_ring, entry);
+ sun4c_user_taken_entries++;
+ }
+}
+
+/* 2 page buckets for task struct and kernel stack allocation.
+ *
+ * TASK_STACK_BEGIN
+ * bucket[0]
+ * bucket[1]
+ * [ ... ]
+ * bucket[NR_TASK_BUCKETS-1]
+ * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
+ *
+ * Each slot looks like:
+ *
+ * page 1 -- task struct + beginning of kernel stack
+ * page 2 -- rest of kernel stack
+ */
+
+union task_union *sun4c_bucket[NR_TASK_BUCKETS];
+
+static int sun4c_lowbucket_avail;
+
+#define BUCKET_EMPTY ((union task_union *) 0)
+#define BUCKET_SHIFT (PAGE_SHIFT + 1) /* log2(sizeof(struct task_bucket)) */
+#define BUCKET_SIZE (1 << BUCKET_SHIFT)
+#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
+#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
+#define BUCKET_PTE(page) \
+ ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
+#define BUCKET_PTE_PAGE(pte) \
+ (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
+
+static void get_locked_segment(unsigned long addr)
+{
+ struct sun4c_mmu_entry *stolen;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ addr &= SUN4C_REAL_PGDIR_MASK;
+ stolen = sun4c_user_strategy();
+ max_user_taken_entries--;
+ stolen->vaddr = addr;
+ flush_user_windows();
+ sun4c_kernel_map(stolen);
+ local_irq_restore(flags);
+}
+
+static void free_locked_segment(unsigned long addr)
+{
+ struct sun4c_mmu_entry *entry;
+ unsigned long flags;
+ unsigned char pseg;
+
+ local_irq_save(flags);
+ addr &= SUN4C_REAL_PGDIR_MASK;
+ pseg = sun4c_get_segmap(addr);
+ entry = &mmu_entry_pool[pseg];
+
+ flush_user_windows();
+ sun4c_flush_segment(addr);
+ sun4c_kernel_unmap(entry);
+ add_ring(&sun4c_ufree_ring, entry);
+ max_user_taken_entries++;
+ local_irq_restore(flags);
+}
+
+static inline void garbage_collect(int entry)
+{
+ int start, end;
+
+ /* 32 buckets per segment... */
+ entry &= ~31;
+ start = entry;
+ for (end = (start + 32); start < end; start++)
+ if (sun4c_bucket[start] != BUCKET_EMPTY)
+ return;
+
+ /* Entire segment empty, release it. */
+ free_locked_segment(BUCKET_ADDR(entry));
+}
+
+static struct thread_info *sun4c_alloc_thread_info(void)
+{
+ unsigned long addr, pages;
+ int entry;
+
+ pages = __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER);
+ if (!pages)
+ return NULL;
+
+ for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
+ if (sun4c_bucket[entry] == BUCKET_EMPTY)
+ break;
+ if (entry == NR_TASK_BUCKETS) {
+ free_pages(pages, THREAD_INFO_ORDER);
+ return NULL;
+ }
+ if (entry >= sun4c_lowbucket_avail)
+ sun4c_lowbucket_avail = entry + 1;
+
+ addr = BUCKET_ADDR(entry);
+ sun4c_bucket[entry] = (union task_union *) addr;
+ if(sun4c_get_segmap(addr) == invalid_segment)
+ get_locked_segment(addr);
+
+ /* We are changing the virtual color of the page(s)
+ * so we must flush the cache to guarantee consistency.
+ */
+ sun4c_flush_page(pages);
+#ifndef CONFIG_SUN4
+ sun4c_flush_page(pages + PAGE_SIZE);
+#endif
+
+ sun4c_put_pte(addr, BUCKET_PTE(pages));
+#ifndef CONFIG_SUN4
+ sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
+#endif
+
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset((void *)addr, 0, PAGE_SIZE << THREAD_INFO_ORDER);
+#endif /* DEBUG_STACK_USAGE */
+
+ return (struct thread_info *) addr;
+}
+
+static void sun4c_free_thread_info(struct thread_info *ti)
+{
+ unsigned long tiaddr = (unsigned long) ti;
+ unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tiaddr));
+ int entry = BUCKET_NUM(tiaddr);
+
+ /* We are deleting a mapping, so the flush here is mandatory. */
+ sun4c_flush_page(tiaddr);
+#ifndef CONFIG_SUN4
+ sun4c_flush_page(tiaddr + PAGE_SIZE);
+#endif
+ sun4c_put_pte(tiaddr, 0);
+#ifndef CONFIG_SUN4
+ sun4c_put_pte(tiaddr + PAGE_SIZE, 0);
+#endif
+ sun4c_bucket[entry] = BUCKET_EMPTY;
+ if (entry < sun4c_lowbucket_avail)
+ sun4c_lowbucket_avail = entry;
+
+ free_pages(pages, THREAD_INFO_ORDER);
+ garbage_collect(entry);
+}
+
+static void __init sun4c_init_buckets(void)
+{
+ int entry;
+
+ if (sizeof(union thread_union) != (PAGE_SIZE << THREAD_INFO_ORDER)) {
+ extern void thread_info_size_is_bolixed_pete(void);
+ thread_info_size_is_bolixed_pete();
+ }
+
+ for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
+ sun4c_bucket[entry] = BUCKET_EMPTY;
+ sun4c_lowbucket_avail = 0;
+}
+
+static unsigned long sun4c_iobuffer_start;
+static unsigned long sun4c_iobuffer_end;
+static unsigned long sun4c_iobuffer_high;
+static unsigned long *sun4c_iobuffer_map;
+static int iobuffer_map_size;
+
+/*
+ * Alias our pages so they do not cause a trap.
+ * Also one page may be aliased into several I/O areas and we may
+ * finish these I/O separately.
+ */
+static char *sun4c_lockarea(char *vaddr, unsigned long size)
+{
+ unsigned long base, scan;
+ unsigned long npages;
+ unsigned long vpage;
+ unsigned long pte;
+ unsigned long apage;
+ unsigned long high;
+ unsigned long flags;
+
+ npages = (((unsigned long)vaddr & ~PAGE_MASK) +
+ size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+ scan = 0;
+ local_irq_save(flags);
+ for (;;) {
+ scan = find_next_zero_bit(sun4c_iobuffer_map,
+ iobuffer_map_size, scan);
+ if ((base = scan) + npages > iobuffer_map_size) goto abend;
+ for (;;) {
+ if (scan >= base + npages) goto found;
+ if (test_bit(scan, sun4c_iobuffer_map)) break;
+ scan++;
+ }
+ }
+
+found:
+ high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
+ high = SUN4C_REAL_PGDIR_ALIGN(high);
+ while (high > sun4c_iobuffer_high) {
+ get_locked_segment(sun4c_iobuffer_high);
+ sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
+ }
+
+ vpage = ((unsigned long) vaddr) & PAGE_MASK;
+ for (scan = base; scan < base+npages; scan++) {
+ pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
+ pte |= pgprot_val(SUN4C_PAGE_KERNEL);
+ pte |= _SUN4C_PAGE_NOCACHE;
+ set_bit(scan, sun4c_iobuffer_map);
+ apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
+
+ /* Flush original mapping so we see the right things later. */
+ sun4c_flush_page(vpage);
+
+ sun4c_put_pte(apage, pte);
+ vpage += PAGE_SIZE;
+ }
+ local_irq_restore(flags);
+ return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
+ (((unsigned long) vaddr) & ~PAGE_MASK));
+
+abend:
+ local_irq_restore(flags);
+ printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
+ panic("Out of iobuffer table");
+ return NULL;
+}
+
+static void sun4c_unlockarea(char *vaddr, unsigned long size)
+{
+ unsigned long vpage, npages;
+ unsigned long flags;
+ int scan, high;
+
+ vpage = (unsigned long)vaddr & PAGE_MASK;
+ npages = (((unsigned long)vaddr & ~PAGE_MASK) +
+ size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+ local_irq_save(flags);
+ while (npages != 0) {
+ --npages;
+
+ /* This mapping is marked non-cachable, no flush necessary. */
+ sun4c_put_pte(vpage, 0);
+ clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
+ sun4c_iobuffer_map);
+ vpage += PAGE_SIZE;
+ }
+
+ /* garbage collect */
+ scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
+ while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
+ scan -= 32;
+ scan += 32;
+ high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
+ high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
+ while (high < sun4c_iobuffer_high) {
+ sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
+ free_locked_segment(sun4c_iobuffer_high);
+ }
+ local_irq_restore(flags);
+}
+
+/* Note the scsi code at init time passes to here buffers
+ * which sit on the kernel stack, those are already locked
+ * by implication and fool the page locking code above
+ * if passed to by mistake.
+ */
+static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus)
+{
+ unsigned long page;
+
+ page = ((unsigned long)bufptr) & PAGE_MASK;
+ if (!virt_addr_valid(page)) {
+ sun4c_flush_page(page);
+ return (__u32)bufptr; /* already locked */
+ }
+ return (__u32)sun4c_lockarea(bufptr, len);
+}
+
+static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ while (sz != 0) {
+ --sz;
+ sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
+ sg[sz].dvma_length = sg[sz].length;
+ }
+}
+
+static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus)
+{
+ if (bufptr < sun4c_iobuffer_start)
+ return; /* On kernel stack or similar, see above */
+ sun4c_unlockarea((char *)bufptr, len);
+}
+
+static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
+{
+ while (sz != 0) {
+ --sz;
+ sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
+ }
+}
+
+#define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+struct vm_area_struct sun4c_kstack_vma;
+
+static void __init sun4c_init_lock_areas(void)
+{
+ unsigned long sun4c_taskstack_start;
+ unsigned long sun4c_taskstack_end;
+ int bitmap_size;
+
+ sun4c_init_buckets();
+ sun4c_taskstack_start = SUN4C_LOCK_VADDR;
+ sun4c_taskstack_end = (sun4c_taskstack_start +
+ (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
+ if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
+ prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
+ prom_halt();
+ }
+
+ sun4c_iobuffer_start = sun4c_iobuffer_high =
+ SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
+ sun4c_iobuffer_end = SUN4C_LOCK_END;
+ bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
+ bitmap_size = (bitmap_size + 7) >> 3;
+ bitmap_size = LONG_ALIGN(bitmap_size);
+ iobuffer_map_size = bitmap_size << 3;
+ sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
+ memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
+
+ sun4c_kstack_vma.vm_mm = &init_mm;
+ sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
+ sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
+ sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
+ sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+ insert_vm_struct(&init_mm, &sun4c_kstack_vma);
+}
+
+/* Cache flushing on the sun4c. */
+static void sun4c_flush_cache_all(void)
+{
+ unsigned long begin, end;
+
+ flush_user_windows();
+ begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
+ end = (begin + SUN4C_VAC_SIZE);
+
+ if (sun4c_vacinfo.linesize == 32) {
+ while (begin < end) {
+ __asm__ __volatile__(
+ "ld [%0 + 0x00], %%g0\n\t"
+ "ld [%0 + 0x20], %%g0\n\t"
+ "ld [%0 + 0x40], %%g0\n\t"
+ "ld [%0 + 0x60], %%g0\n\t"
+ "ld [%0 + 0x80], %%g0\n\t"
+ "ld [%0 + 0xa0], %%g0\n\t"
+ "ld [%0 + 0xc0], %%g0\n\t"
+ "ld [%0 + 0xe0], %%g0\n\t"
+ "ld [%0 + 0x100], %%g0\n\t"
+ "ld [%0 + 0x120], %%g0\n\t"
+ "ld [%0 + 0x140], %%g0\n\t"
+ "ld [%0 + 0x160], %%g0\n\t"
+ "ld [%0 + 0x180], %%g0\n\t"
+ "ld [%0 + 0x1a0], %%g0\n\t"
+ "ld [%0 + 0x1c0], %%g0\n\t"
+ "ld [%0 + 0x1e0], %%g0\n"
+ : : "r" (begin));
+ begin += 512;
+ }
+ } else {
+ while (begin < end) {
+ __asm__ __volatile__(
+ "ld [%0 + 0x00], %%g0\n\t"
+ "ld [%0 + 0x10], %%g0\n\t"
+ "ld [%0 + 0x20], %%g0\n\t"
+ "ld [%0 + 0x30], %%g0\n\t"
+ "ld [%0 + 0x40], %%g0\n\t"
+ "ld [%0 + 0x50], %%g0\n\t"
+ "ld [%0 + 0x60], %%g0\n\t"
+ "ld [%0 + 0x70], %%g0\n\t"
+ "ld [%0 + 0x80], %%g0\n\t"
+ "ld [%0 + 0x90], %%g0\n\t"
+ "ld [%0 + 0xa0], %%g0\n\t"
+ "ld [%0 + 0xb0], %%g0\n\t"
+ "ld [%0 + 0xc0], %%g0\n\t"
+ "ld [%0 + 0xd0], %%g0\n\t"
+ "ld [%0 + 0xe0], %%g0\n\t"
+ "ld [%0 + 0xf0], %%g0\n"
+ : : "r" (begin));
+ begin += 256;
+ }
+ }
+}
+
+static void sun4c_flush_cache_mm(struct mm_struct *mm)
+{
+ int new_ctx = mm->context;
+
+ if (new_ctx != NO_CONTEXT) {
+ flush_user_windows();
+
+ if (sun4c_context_ring[new_ctx].num_entries) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
+
+ sun4c_set_context(new_ctx);
+ sun4c_flush_context();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
+
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
+
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ local_irq_restore(flags);
+ }
+ }
+}
+
+static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int new_ctx = mm->context;
+
+ if (new_ctx != NO_CONTEXT) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ struct sun4c_mmu_entry *entry;
+ unsigned long flags;
+
+ flush_user_windows();
+
+ local_irq_save(flags);
+ /* All user segmap chains are ordered on entry->vaddr. */
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
+ ;
+
+ /* Tracing various job mixtures showed that this conditional
+ * only passes ~35% of the time for most worse case situations,
+ * therefore we avoid all of this gross overhead ~65% of the time.
+ */
+ if ((entry != head) && (entry->vaddr < end)) {
+ int octx = sun4c_get_context();
+ sun4c_set_context(new_ctx);
+
+ /* At this point, always, (start >= entry->vaddr) and
+ * (entry->vaddr < end), once the latter condition
+ * ceases to hold, or we hit the end of the list, we
+ * exit the loop. The ordering of all user allocated
+ * segmaps makes this all work out so beautifully.
+ */
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
+ unsigned long realend;
+
+ /* "realstart" is always >= entry->vaddr */
+ realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
+ if (end < realend)
+ realend = end;
+ if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
+ unsigned long page = entry->vaddr;
+ while (page < realend) {
+ sun4c_flush_page(page);
+ page += PAGE_SIZE;
+ }
+ } else {
+ sun4c_flush_segment(entry->vaddr);
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
+ }
+ entry = next;
+ } while ((entry != head) && (entry->vaddr < end));
+ sun4c_set_context(octx);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int new_ctx = mm->context;
+
+ /* Sun4c has no separate I/D caches so cannot optimize for non
+ * text page flushes.
+ */
+ if (new_ctx != NO_CONTEXT) {
+ int octx = sun4c_get_context();
+ unsigned long flags;
+
+ flush_user_windows();
+ local_irq_save(flags);
+ sun4c_set_context(new_ctx);
+ sun4c_flush_page(page);
+ sun4c_set_context(octx);
+ local_irq_restore(flags);
+ }
+}
+
+static void sun4c_flush_page_to_ram(unsigned long page)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sun4c_flush_page(page);
+ local_irq_restore(flags);
+}
+
+/* Sun4c cache is unified, both instructions and data live there, so
+ * no need to flush the on-stack instructions for new signal handlers.
+ */
+static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+}
+
+/* TLB flushing on the sun4c. These routines count on the cache
+ * flushing code to flush the user register windows so that we need
+ * not do so when we get here.
+ */
+
+static void sun4c_flush_tlb_all(void)
+{
+ struct sun4c_mmu_entry *this_entry, *next_entry;
+ unsigned long flags;
+ int savectx, ctx;
+
+ local_irq_save(flags);
+ this_entry = sun4c_kernel_ring.ringhd.next;
+ savectx = sun4c_get_context();
+ flush_user_windows();
+ while (sun4c_kernel_ring.num_entries) {
+ next_entry = this_entry->next;
+ sun4c_flush_segment(this_entry->vaddr);
+ for (ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(this_entry->vaddr, invalid_segment);
+ }
+ free_kernel_entry(this_entry, &sun4c_kernel_ring);
+ this_entry = next_entry;
+ }
+ sun4c_set_context(savectx);
+ local_irq_restore(flags);
+}
+
+static void sun4c_flush_tlb_mm(struct mm_struct *mm)
+{
+ int new_ctx = mm->context;
+
+ if (new_ctx != NO_CONTEXT) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
+
+ sun4c_set_context(new_ctx);
+ sun4c_flush_context();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
+
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
+
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int new_ctx = mm->context;
+
+ if (new_ctx != NO_CONTEXT) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ struct sun4c_mmu_entry *entry;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ /* See commentary in sun4c_flush_cache_range(). */
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
+ ;
+
+ if ((entry != head) && (entry->vaddr < end)) {
+ int octx = sun4c_get_context();
+
+ sun4c_set_context(new_ctx);
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
+
+ sun4c_flush_segment(entry->vaddr);
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
+
+ entry = next;
+ } while ((entry != head) && (entry->vaddr < end));
+ sun4c_set_context(octx);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int new_ctx = mm->context;
+
+ if (new_ctx != NO_CONTEXT) {
+ int savectx = sun4c_get_context();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sun4c_set_context(new_ctx);
+ page &= PAGE_MASK;
+ sun4c_flush_page(page);
+ sun4c_put_pte(page, 0);
+ sun4c_set_context(savectx);
+ local_irq_restore(flags);
+ }
+}
+
+static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
+{
+ unsigned long page_entry;
+
+ page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
+ page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
+ sun4c_put_pte(virt_addr, page_entry);
+}
+
+static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,
+ unsigned long xva, unsigned int len)
+{
+ while (len != 0) {
+ len -= PAGE_SIZE;
+ sun4c_mapioaddr(xpa, xva);
+ xva += PAGE_SIZE;
+ xpa += PAGE_SIZE;
+ }
+}
+
+static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len)
+{
+ while (len != 0) {
+ len -= PAGE_SIZE;
+ sun4c_put_pte(virt_addr, 0);
+ virt_addr += PAGE_SIZE;
+ }
+}
+
+static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
+{
+ struct ctx_list *ctxp;
+
+ ctxp = ctx_free.next;
+ if (ctxp != &ctx_free) {
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ mm->context = ctxp->ctx_number;
+ ctxp->ctx_mm = mm;
+ return;
+ }
+ ctxp = ctx_used.next;
+ if (ctxp->ctx_mm == old_mm)
+ ctxp = ctxp->next;
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ ctxp->ctx_mm->context = NO_CONTEXT;
+ ctxp->ctx_mm = mm;
+ mm->context = ctxp->ctx_number;
+ sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],
+ ctxp->ctx_number);
+}
+
+/* Switch the current MM context. */
+static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
+{
+ struct ctx_list *ctx;
+ int dirty = 0;
+
+ if (mm->context == NO_CONTEXT) {
+ dirty = 1;
+ sun4c_alloc_context(old_mm, mm);
+ } else {
+ /* Update the LRU ring of contexts. */
+ ctx = ctx_list_pool + mm->context;
+ remove_from_ctx_list(ctx);
+ add_to_used_ctxlist(ctx);
+ }
+ if (dirty || old_mm != mm)
+ sun4c_set_context(mm->context);
+}
+
+static void sun4c_destroy_context(struct mm_struct *mm)
+{
+ struct ctx_list *ctx_old;
+
+ if (mm->context != NO_CONTEXT) {
+ sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
+ ctx_old = ctx_list_pool + mm->context;
+ remove_from_ctx_list(ctx_old);
+ add_to_free_ctxlist(ctx_old);
+ mm->context = NO_CONTEXT;
+ }
+}
+
+static void sun4c_mmu_info(struct seq_file *m)
+{
+ int used_user_entries, i;
+
+ used_user_entries = 0;
+ for (i = 0; i < num_contexts; i++)
+ used_user_entries += sun4c_context_ring[i].num_entries;
+
+ seq_printf(m,
+ "vacsize\t\t: %d bytes\n"
+ "vachwflush\t: %s\n"
+ "vaclinesize\t: %d bytes\n"
+ "mmuctxs\t\t: %d\n"
+ "mmupsegs\t: %d\n"
+ "kernelpsegs\t: %d\n"
+ "kfreepsegs\t: %d\n"
+ "usedpsegs\t: %d\n"
+ "ufreepsegs\t: %d\n"
+ "user_taken\t: %d\n"
+ "max_taken\t: %d\n",
+ sun4c_vacinfo.num_bytes,
+ (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
+ sun4c_vacinfo.linesize,
+ num_contexts,
+ (invalid_segment + 1),
+ sun4c_kernel_ring.num_entries,
+ sun4c_kfree_ring.num_entries,
+ used_user_entries,
+ sun4c_ufree_ring.num_entries,
+ sun4c_user_taken_entries,
+ max_user_taken_entries);
+}
+
+/* Nothing below here should touch the mmu hardware nor the mmu_entry
+ * data structures.
+ */
+
+/* First the functions which the mid-level code uses to directly
+ * manipulate the software page tables. Some defines since we are
+ * emulating the i386 page directory layout.
+ */
+#define PGD_PRESENT 0x001
+#define PGD_RW 0x002
+#define PGD_USER 0x004
+#define PGD_ACCESSED 0x020
+#define PGD_DIRTY 0x040
+#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
+
+static void sun4c_set_pte(pte_t *ptep, pte_t pte)
+{
+ *ptep = pte;
+}
+
+static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{
+}
+
+static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep)
+{
+ pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;
+}
+
+static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep)
+{
+ if (page_address(ptep) == NULL) BUG(); /* No highmem on sun4c */
+ pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);
+}
+
+static int sun4c_pte_present(pte_t pte)
+{
+ return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
+}
+static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
+
+static int sun4c_pte_read(pte_t pte)
+{
+ return (pte_val(pte) & _SUN4C_PAGE_READ);
+}
+
+static int sun4c_pmd_bad(pmd_t pmd)
+{
+ return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
+ (!virt_addr_valid(pmd_val(pmd))));
+}
+
+static int sun4c_pmd_present(pmd_t pmd)
+{
+ return ((pmd_val(pmd) & PGD_PRESENT) != 0);
+}
+
+#if 0 /* if PMD takes one word */
+static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); }
+#else /* if pmd_t is a longish aggregate */
+static void sun4c_pmd_clear(pmd_t *pmdp) {
+ memset((void *)pmdp, 0, sizeof(pmd_t));
+}
+#endif
+
+static int sun4c_pgd_none(pgd_t pgd) { return 0; }
+static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
+static int sun4c_pgd_present(pgd_t pgd) { return 1; }
+static void sun4c_pgd_clear(pgd_t * pgdp) { }
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static pte_t sun4c_pte_mkwrite(pte_t pte)
+{
+ pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
+ if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)
+ pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
+ return pte;
+}
+
+static pte_t sun4c_pte_mkdirty(pte_t pte)
+{
+ pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);
+ if (pte_val(pte) & _SUN4C_PAGE_WRITE)
+ pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
+ return pte;
+}
+
+static pte_t sun4c_pte_mkyoung(pte_t pte)
+{
+ pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);
+ if (pte_val(pte) & _SUN4C_PAGE_READ)
+ pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);
+ return pte;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
+{
+ return __pte(page_to_pfn(page) | pgprot_val(pgprot));
+}
+
+static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
+{
+ return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
+static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
+{
+ return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
+static unsigned long sun4c_pte_pfn(pte_t pte)
+{
+ return pte_val(pte) & SUN4C_PFN_MASK;
+}
+
+static pte_t sun4c_pgoff_to_pte(unsigned long pgoff)
+{
+ return __pte(pgoff | _SUN4C_PAGE_FILE);
+}
+
+static unsigned long sun4c_pte_to_pgoff(pte_t pte)
+{
+ return pte_val(pte) & ((1UL << PTE_FILE_MAX_BITS) - 1);
+}
+
+
+static __inline__ unsigned long sun4c_pmd_page_v(pmd_t pmd)
+{
+ return (pmd_val(pmd) & PAGE_MASK);
+}
+
+static struct page *sun4c_pmd_page(pmd_t pmd)
+{
+ return virt_to_page(sun4c_pmd_page_v(pmd));
+}
+
+static unsigned long sun4c_pgd_page(pgd_t pgd) { return 0; }
+
+/* to find an entry in a page-table-directory */
+static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
+{
+ return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
+}
+
+/* Find an entry in the second-level page table.. */
+static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+
+/* Find an entry in the third-level page table.. */
+pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
+{
+ return (pte_t *) sun4c_pmd_page_v(*dir) +
+ ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
+}
+
+static unsigned long sun4c_swp_type(swp_entry_t entry)
+{
+ return (entry.val & SUN4C_SWP_TYPE_MASK);
+}
+
+static unsigned long sun4c_swp_offset(swp_entry_t entry)
+{
+ return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
+}
+
+static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
+{
+ return (swp_entry_t) {
+ (offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
+ | (type & SUN4C_SWP_TYPE_MASK) };
+}
+
+static void sun4c_free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static void sun4c_free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+static pgd_t *sun4c_get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if ((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else {
+ pgd_t *init;
+
+ ret = (unsigned long *)__get_free_page(GFP_KERNEL);
+ memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
+ init = sun4c_pgd_offset(&init_mm, 0);
+ memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return (pgd_t *)ret;
+}
+
+static void sun4c_free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+
+static __inline__ pte_t *
+sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
+{
+ unsigned long *ret;
+
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte;
+
+ if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
+ return pte;
+
+ pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+ if (pte)
+ memset(pte, 0, PAGE_SIZE);
+ return pte;
+}
+
+static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address);
+ if (pte == NULL)
+ return NULL;
+ return virt_to_page(pte);
+}
+
+static __inline__ void sun4c_free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+static void sun4c_pte_free(struct page *pte)
+{
+ sun4c_free_pte_fast(page_address(pte));
+}
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ BUG();
+ return NULL;
+}
+
+static void sun4c_free_pmd_fast(pmd_t * pmd) { }
+
+static void sun4c_check_pgt_cache(int low, int high)
+{
+ if (pgtable_cache_size > high) {
+ do {
+ if (pgd_quicklist)
+ sun4c_free_pgd_slow(sun4c_get_pgd_fast());
+ if (pte_quicklist)
+ sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0));
+ } while (pgtable_cache_size > low);
+ }
+}
+
+/* An experiment, turn off by default for now... -DaveM */
+#define SUN4C_PRELOAD_PSEG
+
+void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ int pseg;
+
+ local_irq_save(flags);
+ address &= PAGE_MASK;
+ if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
+ struct sun4c_mmu_entry *entry = sun4c_user_strategy();
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long start, end;
+
+ entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
+ entry->ctx = mm->context;
+ add_ring_ordered(sun4c_context_ring + mm->context, entry);
+ sun4c_put_segmap(entry->vaddr, entry->pseg);
+ end = start + SUN4C_REAL_PGDIR_SIZE;
+ while (start < end) {
+#ifdef SUN4C_PRELOAD_PSEG
+ pgd_t *pgdp = sun4c_pgd_offset(mm, start);
+ pte_t *ptep;
+
+ if (!pgdp)
+ goto no_mapping;
+ ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, start);
+ if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
+ goto no_mapping;
+ sun4c_put_pte(start, pte_val(*ptep));
+ goto next;
+
+ no_mapping:
+#endif
+ sun4c_put_pte(start, 0);
+#ifdef SUN4C_PRELOAD_PSEG
+ next:
+#endif
+ start += PAGE_SIZE;
+ }
+#ifndef SUN4C_PRELOAD_PSEG
+ sun4c_put_pte(address, pte_val(pte));
+#endif
+ local_irq_restore(flags);
+ return;
+ } else {
+ struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
+
+ remove_lru(entry);
+ add_lru(entry);
+ }
+
+ sun4c_put_pte(address, pte_val(pte));
+ local_irq_restore(flags);
+}
+
+extern void sparc_context_init(int);
+extern unsigned long end;
+extern unsigned long bootmem_init(unsigned long *pages_avail);
+extern unsigned long last_valid_pfn;
+
+void __init sun4c_paging_init(void)
+{
+ int i, cnt;
+ unsigned long kernel_end, vaddr;
+ extern struct resource sparc_iomap;
+ unsigned long end_pfn, pages_avail;
+
+ kernel_end = (unsigned long) &end;
+ kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4);
+ kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
+
+ pages_avail = 0;
+ last_valid_pfn = bootmem_init(&pages_avail);
+ end_pfn = last_valid_pfn;
+
+ sun4c_probe_mmu();
+ invalid_segment = (num_segmaps - 1);
+ sun4c_init_mmu_entry_pool();
+ sun4c_init_rings();
+ sun4c_init_map_kernelprom(kernel_end);
+ sun4c_init_clean_mmu(kernel_end);
+ sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
+ sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
+ sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
+ sun4c_init_lock_areas();
+ sun4c_init_fill_user_ring();
+
+ sun4c_set_context(0);
+ memset(swapper_pg_dir, 0, PAGE_SIZE);
+ memset(pg0, 0, PAGE_SIZE);
+ memset(pg1, 0, PAGE_SIZE);
+ memset(pg2, 0, PAGE_SIZE);
+ memset(pg3, 0, PAGE_SIZE);
+
+ /* Save work later. */
+ vaddr = VMALLOC_START;
+ swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
+ vaddr += SUN4C_PGDIR_SIZE;
+ swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
+ vaddr += SUN4C_PGDIR_SIZE;
+ swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2);
+ vaddr += SUN4C_PGDIR_SIZE;
+ swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
+ sun4c_init_ss2_cache_bug();
+ sparc_context_init(num_contexts);
+
+ {
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long zholes_size[MAX_NR_ZONES];
+ unsigned long npages;
+ int znum;
+
+ for (znum = 0; znum < MAX_NR_ZONES; znum++)
+ zones_size[znum] = zholes_size[znum] = 0;
+
+ npages = max_low_pfn - pfn_base;
+
+ zones_size[ZONE_DMA] = npages;
+ zholes_size[ZONE_DMA] = npages - pages_avail;
+
+ npages = highend_pfn - max_low_pfn;
+ zones_size[ZONE_HIGHMEM] = npages;
+ zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
+
+ free_area_init_node(0, &contig_page_data, zones_size,
+ pfn_base, zholes_size);
+ }
+
+ cnt = 0;
+ for (i = 0; i < num_segmaps; i++)
+ if (mmu_entry_pool[i].locked)
+ cnt++;
+
+ max_user_taken_entries = num_segmaps - cnt - 40 - 1;
+
+ printk("SUN4C: %d mmu entries for the kernel\n", cnt);
+}
+
+/* Load up routines and constants for sun4c mmu */
+void __init ld_mmu_sun4c(void)
+{
+ extern void ___xchg32_sun4c(void);
+
+ printk("Loading sun4c MMU routines\n");
+
+ /* First the constants */
+ BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
+ BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
+ BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
+
+ BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
+ BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
+ BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
+
+ BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
+ BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
+ page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
+ pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
+ _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
+
+ /* Functions */
+ BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
+
+ if (sun4c_vacinfo.do_hwflushes) {
+ BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_hw, BTFIXUPCALL_NORM);
+ } else {
+ BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_sw, BTFIXUPCALL_NORM);
+ }
+
+ BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__flush_page_to_ram, sun4c_flush_page_to_ram, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
+
+ /* The 2.4.18 code does not set this on sun4c, how does it work? XXX */
+ /* BTFIXUPSET_SETHI(none_mask, 0x00000000); */ /* Defaults to zero? */
+
+ BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM);
+#if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */
+ BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
+#else
+ BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
+#endif
+ BTFIXUPSET_CALL(pmd_set, sun4c_pmd_set, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_populate, sun4c_pmd_populate, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
+ BTFIXUPSET_CALL(pte_read, sun4c_pte_read, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
+
+ BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
+ BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
+ BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
+
+ BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
+ BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_offset_kernel, sun4c_pte_offset_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
+ BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
+ BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
+ BTFIXUPSET_HALF(pte_filei, _SUN4C_PAGE_FILE);
+ BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
+ BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
+ BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
+ BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(pte_to_pgoff, sun4c_pte_to_pgoff, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pgoff_to_pte, sun4c_pgoff_to_pte, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_translate_dvma, sun4c_translate_dvma, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
+
+ BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
+
+ /* These should _never_ get called with two level tables. */
+ BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0);
+}
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
new file mode 100644
index 000000000000..2dcaa5ac1a38
--- /dev/null
+++ b/arch/sparc/mm/swift.S
@@ -0,0 +1,256 @@
+/* $Id: swift.S,v 1.9 2002/01/08 11:11:59 davem Exp $
+ * swift.S: MicroSparc-II mmu/cache operations.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asm_offsets.h>
+
+ .text
+ .align 4
+
+#if 1 /* XXX screw this, I can't get the VAC flushes working
+ * XXX reliably... -DaveM
+ */
+ .globl swift_flush_cache_all, swift_flush_cache_mm
+ .globl swift_flush_cache_range, swift_flush_cache_page
+ .globl swift_flush_page_for_dma
+ .globl swift_flush_page_to_ram
+
+swift_flush_cache_all:
+swift_flush_cache_mm:
+swift_flush_cache_range:
+swift_flush_cache_page:
+swift_flush_page_for_dma:
+swift_flush_page_to_ram:
+ sethi %hi(0x2000), %o0
+1: subcc %o0, 0x10, %o0
+ add %o0, %o0, %o1
+ sta %g0, [%o0] ASI_M_DATAC_TAG
+ bne 1b
+ sta %g0, [%o1] ASI_M_TXTC_TAG
+ retl
+ nop
+#else
+
+ .globl swift_flush_cache_all
+swift_flush_cache_all:
+ WINDOW_FLUSH(%g4, %g5)
+
+ /* Just clear out all the tags. */
+ sethi %hi(16 * 1024), %o0
+1: subcc %o0, 16, %o0
+ sta %g0, [%o0] ASI_M_TXTC_TAG
+ bne 1b
+ sta %g0, [%o0] ASI_M_DATAC_TAG
+ retl
+ nop
+
+ .globl swift_flush_cache_mm
+swift_flush_cache_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be swift_flush_cache_mm_out
+ WINDOW_FLUSH(%g4, %g5)
+ rd %psr, %g1
+ andn %g1, PSR_ET, %g3
+ wr %g3, 0x0, %psr
+ nop
+ nop
+ mov SRMMU_CTX_REG, %g7
+ lda [%g7] ASI_M_MMUREGS, %g5
+ sta %g2, [%g7] ASI_M_MMUREGS
+
+#if 1
+ sethi %hi(0x2000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o0] ASI_M_FLUSH_CTX
+ bne 1b
+ nop
+#else
+ clr %o0
+ or %g0, 2048, %g7
+ or %g0, 2048, %o1
+ add %o1, 2048, %o2
+ add %o2, 2048, %o3
+ mov 16, %o4
+ add %o4, 2048, %o5
+ add %o5, 2048, %g2
+ add %g2, 2048, %g3
+1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX
+ subcc %g7, 32, %g7
+ bne 1b
+ add %o0, 32, %o0
+#endif
+
+ mov SRMMU_CTX_REG, %g7
+ sta %g5, [%g7] ASI_M_MMUREGS
+ wr %g1, 0x0, %psr
+ nop
+ nop
+swift_flush_cache_mm_out:
+ retl
+ nop
+
+ .globl swift_flush_cache_range
+swift_flush_cache_range:
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+ sub %o2, %o1, %o2
+ sethi %hi(4096), %o3
+ cmp %o2, %o3
+ bgu swift_flush_cache_mm
+ nop
+ b 70f
+ nop
+
+ .globl swift_flush_cache_page
+swift_flush_cache_page:
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+70:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be swift_flush_cache_page_out
+ WINDOW_FLUSH(%g4, %g5)
+ rd %psr, %g1
+ andn %g1, PSR_ET, %g3
+ wr %g3, 0x0, %psr
+ nop
+ nop
+ mov SRMMU_CTX_REG, %g7
+ lda [%g7] ASI_M_MMUREGS, %g5
+ sta %g2, [%g7] ASI_M_MMUREGS
+
+ andn %o1, (PAGE_SIZE - 1), %o1
+#if 1
+ sethi %hi(0x1000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ bne 1b
+ nop
+#else
+ or %g0, 512, %g7
+ or %g0, 512, %o0
+ add %o0, 512, %o2
+ add %o2, 512, %o3
+ add %o3, 512, %o4
+ add %o4, 512, %o5
+ add %o5, 512, %g3
+ add %g3, 512, %g4
+1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+ subcc %g7, 16, %g7
+ bne 1b
+ add %o1, 16, %o1
+#endif
+
+ mov SRMMU_CTX_REG, %g7
+ sta %g5, [%g7] ASI_M_MMUREGS
+ wr %g1, 0x0, %psr
+ nop
+ nop
+swift_flush_cache_page_out:
+ retl
+ nop
+
+ /* Swift is write-thru, however it is not
+ * I/O nor TLB-walk coherent. Also it has
+ * caches which are virtually indexed and tagged.
+ */
+ .globl swift_flush_page_for_dma
+ .globl swift_flush_page_to_ram
+swift_flush_page_for_dma:
+swift_flush_page_to_ram:
+ andn %o0, (PAGE_SIZE - 1), %o1
+#if 1
+ sethi %hi(0x1000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ bne 1b
+ nop
+#else
+ or %g0, 512, %g7
+ or %g0, 512, %o0
+ add %o0, 512, %o2
+ add %o2, 512, %o3
+ add %o3, 512, %o4
+ add %o4, 512, %o5
+ add %o5, 512, %g3
+ add %g3, 512, %g4
+1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+ subcc %g7, 16, %g7
+ bne 1b
+ add %o1, 16, %o1
+#endif
+ retl
+ nop
+#endif
+
+ .globl swift_flush_sig_insns
+swift_flush_sig_insns:
+ flush %o1
+ retl
+ flush %o1 + 4
+
+ .globl swift_flush_tlb_mm
+ .globl swift_flush_tlb_range
+ .globl swift_flush_tlb_all
+swift_flush_tlb_range:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+swift_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be swift_flush_tlb_all_out
+swift_flush_tlb_all:
+ mov 0x400, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+swift_flush_tlb_all_out:
+ retl
+ nop
+
+ .globl swift_flush_tlb_page
+swift_flush_tlb_page:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+ cmp %o3, -1
+ be swift_flush_tlb_page_out
+ nop
+#if 1
+ mov 0x400, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+#else
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sta %o3, [%g1] ASI_M_MMUREGS
+ sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ sta %g5, [%g1] ASI_M_MMUREGS
+#endif
+swift_flush_tlb_page_out:
+ retl
+ nop
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
new file mode 100644
index 000000000000..8acd1787fde2
--- /dev/null
+++ b/arch/sparc/mm/tsunami.S
@@ -0,0 +1,133 @@
+/* $Id: tsunami.S,v 1.7 2001/12/21 04:56:15 davem Exp $
+ * tsunami.S: High speed MicroSparc-I mmu/cache operations.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/asm_offsets.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+
+ .text
+ .align 4
+
+ .globl tsunami_flush_cache_all, tsunami_flush_cache_mm
+ .globl tsunami_flush_cache_range, tsunami_flush_cache_page
+ .globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma
+ .globl tsunami_flush_sig_insns
+ .globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm
+ .globl tsunami_flush_tlb_range, tsunami_flush_tlb_page
+
+ /* Sliiick... */
+tsunami_flush_cache_page:
+tsunami_flush_cache_range:
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+tsunami_flush_cache_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be tsunami_flush_cache_out
+tsunami_flush_cache_all:
+ WINDOW_FLUSH(%g4, %g5)
+tsunami_flush_page_for_dma:
+ sta %g0, [%g0] ASI_M_IC_FLCLEAR
+ sta %g0, [%g0] ASI_M_DC_FLCLEAR
+tsunami_flush_cache_out:
+tsunami_flush_page_to_ram:
+ retl
+ nop
+
+tsunami_flush_sig_insns:
+ flush %o1
+ retl
+ flush %o1 + 4
+
+ /* More slick stuff... */
+tsunami_flush_tlb_range:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+tsunami_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be tsunami_flush_tlb_out
+tsunami_flush_tlb_all:
+ mov 0x400, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ nop
+ nop
+ nop
+ nop
+ nop
+tsunami_flush_tlb_out:
+ retl
+ nop
+
+ /* This one can be done in a fine grained manner... */
+tsunami_flush_tlb_page:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+ cmp %o3, -1
+ be tsunami_flush_tlb_page_out
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sta %o3, [%g1] ASI_M_MMUREGS
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ nop
+ nop
+ nop
+ nop
+ nop
+tsunami_flush_tlb_page_out:
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
+ ldd [src + offset + 0x18], t0; \
+ std t0, [dst + offset + 0x18]; \
+ ldd [src + offset + 0x10], t2; \
+ std t2, [dst + offset + 0x10]; \
+ ldd [src + offset + 0x08], t0; \
+ std t0, [dst + offset + 0x08]; \
+ ldd [src + offset + 0x00], t2; \
+ std t2, [dst + offset + 0x00];
+
+ .globl tsunami_copy_1page
+tsunami_copy_1page:
+/* NOTE: This routine has to be shorter than 70insns --jj */
+ or %g0, (PAGE_SIZE >> 8), %g1
+1:
+ MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
+ subcc %g1, 1, %g1
+ add %o0, 0x100, %o0
+ bne 1b
+ add %o1, 0x100, %o1
+
+ .globl tsunami_setup_blockops
+tsunami_setup_blockops:
+ sethi %hi(__copy_1page), %o0
+ or %o0, %lo(__copy_1page), %o0
+ sethi %hi(tsunami_copy_1page), %o1
+ or %o1, %lo(tsunami_copy_1page), %o1
+ sethi %hi(tsunami_setup_blockops), %o2
+ or %o2, %lo(tsunami_setup_blockops), %o2
+ ld [%o1], %o4
+1: add %o1, 4, %o1
+ st %o4, [%o0]
+ add %o0, 4, %o0
+ cmp %o1, %o2
+ bne 1b
+ ld [%o1], %o4
+ sta %g0, [%g0] ASI_M_IC_FLCLEAR
+ sta %g0, [%g0] ASI_M_DC_FLCLEAR
+ retl
+ nop
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
new file mode 100644
index 000000000000..f58712d26bf5
--- /dev/null
+++ b/arch/sparc/mm/viking.S
@@ -0,0 +1,284 @@
+/* $Id: viking.S,v 1.19 2001/12/21 04:56:15 davem Exp $
+ * viking.S: High speed Viking cache/mmu operations
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asm_offsets.h>
+#include <asm/asi.h>
+#include <asm/mxcc.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/viking.h>
+#include <asm/btfixup.h>
+
+#ifdef CONFIG_SMP
+ .data
+ .align 4
+sun4dsmp_flush_tlb_spin:
+ .word 0
+#endif
+
+ .text
+ .align 4
+
+ .globl viking_flush_cache_all, viking_flush_cache_mm
+ .globl viking_flush_cache_range, viking_flush_cache_page
+ .globl viking_flush_page, viking_mxcc_flush_page
+ .globl viking_flush_page_for_dma, viking_flush_page_to_ram
+ .globl viking_flush_sig_insns
+ .globl viking_flush_tlb_all, viking_flush_tlb_mm
+ .globl viking_flush_tlb_range, viking_flush_tlb_page
+
+viking_flush_page:
+ sethi %hi(PAGE_OFFSET), %g2
+ sub %o0, %g2, %g3
+ srl %g3, 12, %g1 ! ppage >> 12
+
+ clr %o1 ! set counter, 0 - 127
+ sethi %hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3
+ sethi %hi(0x80000000), %o4
+ sethi %hi(VIKING_PTAG_VALID), %o5
+ sethi %hi(2*PAGE_SIZE), %o0
+ sethi %hi(PAGE_SIZE), %g7
+ clr %o2 ! block counter, 0 - 3
+5:
+ sll %o1, 5, %g4
+ or %g4, %o4, %g4 ! 0x80000000 | (set << 5)
+
+ sll %o2, 26, %g5 ! block << 26
+6:
+ or %g5, %g4, %g5
+ ldda [%g5] ASI_M_DATAC_TAG, %g2
+ cmp %g3, %g1 ! ptag == ppage?
+ bne 7f
+ inc %o2
+
+ andcc %g2, %o5, %g0 ! ptag VALID?
+ be 7f
+ add %g4, %o3, %g2 ! (PAGE_OFFSET + PAGE_SIZE) | (set << 5)
+ ld [%g2], %g3
+ ld [%g2 + %g7], %g3
+ add %g2, %o0, %g2
+ ld [%g2], %g3
+ ld [%g2 + %g7], %g3
+ add %g2, %o0, %g2
+ ld [%g2], %g3
+ ld [%g2 + %g7], %g3
+ add %g2, %o0, %g2
+ ld [%g2], %g3
+ b 8f
+ ld [%g2 + %g7], %g3
+
+7:
+ cmp %o2, 3
+ ble 6b
+ sll %o2, 26, %g5 ! block << 26
+
+8: inc %o1
+ cmp %o1, 0x7f
+ ble 5b
+ clr %o2
+
+9: retl
+ nop
+
+viking_mxcc_flush_page:
+ sethi %hi(PAGE_OFFSET), %g2
+ sub %o0, %g2, %g3
+ sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE
+ sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM)
+ mov 0x10, %g2 ! set cacheable bit
+ or %o3, %lo(MXCC_SRCSTREAM), %o2
+ or %o3, %lo(MXCC_DESSTREAM), %o3
+ sub %g3, MXCC_STREAM_SIZE, %g3
+6:
+ stda %g2, [%o2] ASI_M_MXCC
+ stda %g2, [%o3] ASI_M_MXCC
+ andncc %g3, PAGE_MASK, %g0
+ bne 6b
+ sub %g3, MXCC_STREAM_SIZE, %g3
+
+9: retl
+ nop
+
+viking_flush_cache_page:
+viking_flush_cache_range:
+#ifndef CONFIG_SMP
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+#endif
+viking_flush_cache_mm:
+#ifndef CONFIG_SMP
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+ bne viking_flush_cache_all
+ nop
+ b,a viking_flush_cache_out
+#endif
+viking_flush_cache_all:
+ WINDOW_FLUSH(%g4, %g5)
+viking_flush_cache_out:
+ retl
+ nop
+
+viking_flush_tlb_all:
+ mov 0x400, %g1
+ retl
+ sta %g0, [%g1] ASI_M_FLUSH_PROBE
+
+viking_flush_tlb_mm:
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o1
+ lda [%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+ cmp %o1, -1
+ be 1f
+#endif
+ mov 0x300, %g2
+ sta %o1, [%g1] ASI_M_MMUREGS
+ sta %g0, [%g2] ASI_M_FLUSH_PROBE
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+#ifndef CONFIG_SMP
+1: retl
+ nop
+#endif
+
+viking_flush_tlb_range:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+ cmp %o3, -1
+ be 2f
+#endif
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sta %o3, [%g1] ASI_M_MMUREGS
+ and %o1, %o4, %o1
+ add %o1, 0x200, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+1: sub %o1, %o4, %o1
+ cmp %o1, %o2
+ blu,a 1b
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+#ifndef CONFIG_SMP
+2: retl
+ nop
+#endif
+
+viking_flush_tlb_page:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+ cmp %o3, -1
+ be 1f
+#endif
+ and %o1, PAGE_MASK, %o1
+ sta %o3, [%g1] ASI_M_MMUREGS
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ retl
+ sta %g5, [%g1] ASI_M_MMUREGS
+#ifndef CONFIG_SMP
+1: retl
+ nop
+#endif
+
+viking_flush_page_to_ram:
+viking_flush_page_for_dma:
+viking_flush_sig_insns:
+ retl
+ nop
+
+#ifdef CONFIG_SMP
+ .globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm
+ .globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page
+sun4dsmp_flush_tlb_all:
+ sethi %hi(sun4dsmp_flush_tlb_spin), %g3
+1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ tst %g5
+ bne 2f
+ mov 0x400, %g1
+ sta %g0, [%g1] ASI_M_FLUSH_PROBE
+ retl
+ stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+2: tst %g5
+ bne,a 2b
+ ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ b,a 1b
+
+sun4dsmp_flush_tlb_mm:
+ sethi %hi(sun4dsmp_flush_tlb_spin), %g3
+1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ tst %g5
+ bne 2f
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o1
+ lda [%g1] ASI_M_MMUREGS, %g5
+ mov 0x300, %g2
+ sta %o1, [%g1] ASI_M_MMUREGS
+ sta %g0, [%g2] ASI_M_FLUSH_PROBE
+ sta %g5, [%g1] ASI_M_MMUREGS
+ retl
+ stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+2: tst %g5
+ bne,a 2b
+ ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ b,a 1b
+
+sun4dsmp_flush_tlb_range:
+ sethi %hi(sun4dsmp_flush_tlb_spin), %g3
+1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ tst %g5
+ bne 3f
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sta %o3, [%g1] ASI_M_MMUREGS
+ and %o1, %o4, %o1
+ add %o1, 0x200, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+2: sub %o1, %o4, %o1
+ cmp %o1, %o2
+ blu,a 2b
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ sta %g5, [%g1] ASI_M_MMUREGS
+ retl
+ stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+3: tst %g5
+ bne,a 3b
+ ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ b,a 1b
+
+sun4dsmp_flush_tlb_page:
+ sethi %hi(sun4dsmp_flush_tlb_spin), %g3
+1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ tst %g5
+ bne 2f
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ and %o1, PAGE_MASK, %o1
+ sta %o3, [%g1] ASI_M_MMUREGS
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ sta %g5, [%g1] ASI_M_MMUREGS
+ retl
+ stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+2: tst %g5
+ bne,a 2b
+ ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+ b,a 1b
+ nop
+#endif