summaryrefslogtreecommitdiffstats
path: root/include/asm-m32r/smp.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-m32r/smp.h
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-m32r/smp.h')
-rw-r--r--include/asm-m32r/smp.h118
1 files changed, 118 insertions, 0 deletions
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
new file mode 100644
index 000000000000..8cd4d0da4be1
--- /dev/null
+++ b/include/asm-m32r/smp.h
@@ -0,0 +1,118 @@
+#ifndef _ASM_M32R_SMP_H
+#define _ASM_M32R_SMP_H
+
+/* $Id$ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+#ifndef __ASSEMBLY__
+
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <asm/m32r.h>
+
+#define PHYSID_ARRAY_SIZE 1
+
+struct physid_mask
+{
+ unsigned long mask[PHYSID_ARRAY_SIZE];
+};
+
+typedef struct physid_mask physid_mask_t;
+
+#define physid_set(physid, map) set_bit(physid, (map).mask)
+#define physid_clear(physid, map) clear_bit(physid, (map).mask)
+#define physid_isset(physid, map) test_bit(physid, (map).mask)
+#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
+
+#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
+#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
+#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS)
+#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
+#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
+#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
+#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
+#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
+#define physids_coerce(map) ((map).mask[0])
+
+#define physids_promote(physids) \
+ ({ \
+ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
+ __physid_mask.mask[0] = physids; \
+ __physid_mask; \
+ })
+
+#define physid_mask_of_physid(physid) \
+ ({ \
+ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
+ physid_set(physid, __physid_mask); \
+ __physid_mask; \
+ })
+
+#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
+#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
+
+extern physid_mask_t phys_cpu_present_map;
+
+/*
+ * Some lowlevel functions might want to know about
+ * the real CPU ID <-> CPU # mapping.
+ */
+extern volatile int physid_2_cpu[NR_CPUS];
+extern volatile int cpu_2_physid[NR_CPUS];
+#define physid_to_cpu(physid) physid_2_cpu[physid]
+#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
+
+#define smp_processor_id() (current_thread_info()->cpu)
+
+extern cpumask_t cpu_callout_map;
+#define cpu_possible_map cpu_callout_map
+
+static __inline__ int hard_smp_processor_id(void)
+{
+ return (int)*(volatile long *)M32R_CPUID_PORTL;
+}
+
+static __inline__ int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+
+static __inline__ int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+static __inline__ unsigned int num_booting_cpus(void)
+{
+ return cpus_weight(cpu_callout_map);
+}
+
+extern void smp_send_timer(void);
+extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+
+#endif /* not __ASSEMBLY__ */
+
+#define NO_PROC_ID (0xff) /* No processor magic marker */
+
+#define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
+
+/*
+ * M32R-mp IPI
+ */
+#define RESCHEDULE_IPI (M32R_IRQ_IPI0-M32R_IRQ_IPI0)
+#define INVALIDATE_TLB_IPI (M32R_IRQ_IPI1-M32R_IRQ_IPI0)
+#define CALL_FUNCTION_IPI (M32R_IRQ_IPI2-M32R_IRQ_IPI0)
+#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
+#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
+#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
+
+#define IPI_SHIFT (0)
+#define NR_IPIS (8)
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_M32R_SMP_H */