summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 09:44:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 09:44:55 -0700
commit4786b4ee22de6304e841b12ee22b849230d7fba3 (patch)
tree08793b8fbcd63204d5d3355ac755745adcfef170 /include
parent253ba4e79edc695b2925bd2ef34de06ff4d4070c (diff)
parent71b264f85ff50c14fe945ffff06ae0d5e9a9124e (diff)
downloadlinux-4786b4ee22de6304e841b12ee22b849230d7fba3.tar.bz2
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits) [IA64] kdump: Add crash_save_vmcoreinfo for INIT [IA64] Fix NUMA configuration issue [IA64] Itanium Spec updates [IA64] Untangle sync_icache_dcache() page size determination [IA64] arch/ia64/kernel/: use time_* macros [IA64] remove redundant display of free swap space in show_mem() [IA64] make IOMMU respect the segment boundary limits [IA64] kprobes: kprobe-booster for ia64 [IA64] fix getpid and set_tid_address fast system calls for pid namespaces [IA64] Replace explicit jiffies tests with time_* macros. [IA64] use goto to jump out do/while_each_thread [IA64] Fix unlock ordering in smp_callin [IA64] pgd_offset() constfication. [IA64] kdump: crash.c coding style fix [IA64] kdump: add kdump_on_fatal_mca [IA64] Minimize per_cpu reservations. [IA64] Correct pernodesize calculation. [IA64] Kernel parameter for max number of concurrent global TLB purges [IA64] Multiple outstanding ptc.g instruction support [IA64] Implement smp_call_function_mask for ia64 ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/acpi.h33
-rw-r--r--include/asm-ia64/cputime.h104
-rw-r--r--include/asm-ia64/elf.h31
-rw-r--r--include/asm-ia64/kprobes.h7
-rw-r--r--include/asm-ia64/kregs.h3
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/numa.h2
-rw-r--r--include/asm-ia64/pal.h72
-rw-r--r--include/asm-ia64/pgtable.h2
-rw-r--r--include/asm-ia64/sal.h21
-rw-r--r--include/asm-ia64/smp.h3
-rw-r--r--include/asm-ia64/system.h12
-rw-r--r--include/asm-ia64/thread_info.h14
-rw-r--r--include/asm-ia64/tlb.h26
-rw-r--r--include/asm-ia64/tlbflush.h1
15 files changed, 316 insertions, 18 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index cd1cc39b5599..fcfad326f4c7 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/numa.h>
#include <asm/system.h>
+#include <asm/numa.h>
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu);
extern void set_cpei_target_cpu(unsigned int cpu);
extern unsigned int get_cpei_target_cpu(void);
extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
#ifdef CONFIG_ACPI_NUMA
#if MAX_NUMNODES > 256
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#define acpi_unlazy_tlb(x)
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu) \
+ for_each_cpu_mask((cpu), early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+ int low_cpu, high_cpu;
+ int cpu;
+ int next_nid = 0;
+
+ low_cpu = cpus_weight(early_cpu_possible_map);
+
+ high_cpu = max(low_cpu, min_cpus);
+ high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+ for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+ cpu_set(cpu, early_cpu_possible_map);
+ if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+ node_cpuid[cpu].nid = next_nid;
+ next_nid++;
+ if (next_nid >= num_online_nodes())
+ next_nid = 0;
+ }
+ }
+}
+#endif /* CONFIG_ACPI_NUMA */
+
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h
index 72400a78002a..f9abdec6577a 100644
--- a/include/asm-ia64/cputime.h
+++ b/include/asm-ia64/cputime.h
@@ -1,6 +1,110 @@
+/*
+ * include/asm-ia64/cputime.h:
+ * Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#include <asm-generic/cputime.h>
+#else
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <asm/processor.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero ((cputime_t)0)
+#define cputime_max ((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+
+#define cputime64_zero ((cputime64_t)0)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime64_sub(__a, __b) ((__a) - (__b))
+#define cputime_to_cputime64(__ct) (__ct)
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
+#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+ cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+ return (ret + val->tv_nsec);
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+ val->tv_sec = ct / NSEC_PER_SEC;
+ val->tv_nsec = ct % NSEC_PER_SEC;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(struct timeval *val)
+{
+ cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+ return (ret + val->tv_usec * NSEC_PER_USEC);
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+ val->tv_sec = ct / NSEC_PER_SEC;
+ val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct)
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
index f8e83eca67a2..5e0c1a6bce8d 100644
--- a/include/asm-ia64/elf.h
+++ b/include/asm-ia64/elf.h
@@ -26,6 +26,7 @@
#define ELF_ARCH EM_IA_64
#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
interpreted as follows by Linux: */
@@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void);
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET 0
+#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
+
typedef unsigned long elf_fpxregset_t;
typedef unsigned long elf_greg_t;
@@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
struct task_struct;
-extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
-extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
-
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index d03bf9ff68e3..ef71b57fc2f4 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -30,8 +30,12 @@
#include <asm/break.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
-#define MAX_INSN_SIZE 1
+#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST (long)(1<<27)
+#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
+ (0x1L << 12) | /* many */ \
+ (((i1) & 1) << 36) | ((i2) << 13))) /* imm */
typedef union cmp_inst {
struct {
@@ -112,6 +116,7 @@ struct arch_specific_insn {
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
#define INST_FLAG_FIX_BRANCH_REG 2
#define INST_FLAG_BREAK_INST 4
+ #define INST_FLAG_BOOSTABLE 8
unsigned long inst_flag;
unsigned short target_br_reg;
unsigned short slot;
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h
index 7e55a584975c..aefcdfee7f23 100644
--- a/include/asm-ia64/kregs.h
+++ b/include/asm-ia64/kregs.h
@@ -31,6 +31,9 @@
#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
+#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
+#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/
+
/* Processor status register bits: */
#define IA64_PSR_BE_BIT 1
#define IA64_PSR_UP_BIT 2
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index f93308f54b61..7245a5781594 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -35,6 +35,7 @@ extern void find_memory (void);
extern void reserve_memory (void);
extern void find_initrd (void);
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
+extern int filter_memory (unsigned long start, unsigned long end, void *arg);
extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
@@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
-extern int register_active_ranges(u64 start, u64 end, void *arg);
+extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h
index 6a8a27cfae3e..3499ff57bf42 100644
--- a/include/asm-ia64/numa.h
+++ b/include/asm-ia64/numa.h
@@ -22,6 +22,8 @@
#include <asm/mmzone.h>
+#define NUMA_NO_NODE -1
+
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 8a695d3407d2..67b02901ead4 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -13,6 +13,7 @@
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
*
* 99/10/01 davidm Make sure we pass zero for reserved parameters.
* 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
@@ -73,6 +74,8 @@
#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
+#define PAL_VP_INFO 50 /* Information about virtual processor features */
+#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */
#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s {
wiv : 1, /* Way field valid */
reserved2 : 1,
dp : 1, /* Data poisoned on MBE */
- reserved3 : 8,
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
index : 20, /* Cache line index */
reserved4 : 2,
@@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s {
dtc : 1, /* Fail in data TC */
itc : 1, /* Fail in inst. TC */
op : 4, /* Cache operation */
- reserved3 : 30,
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
+ reserved4 : 22,
is : 1, /* instruction set (1 == ia32) */
iv : 1, /* instruction set field valid */
@@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s {
way : 6, /* Way of structure */
wv : 1, /* way valid */
xv : 1, /* index valid */
- reserved1 : 8,
+ reserved1 : 6,
+ hlth : 2, /* Health indicator */
index : 8, /* Index or set of the uarch
* structure that failed.
*/
@@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void)
/* Return the machine check dynamic processor state */
static inline s64
-ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds)
+ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0);
+ PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
if (size)
*size = iprv.v0;
- if (pds)
- *pds = iprv.v1;
return iprv.status;
}
@@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous)
return iprv.status;
}
+typedef union pal_hw_tracking_u {
+ u64 pht_data;
+ struct {
+ u64 itc :4, /* Instruction cache tracking */
+ dct :4, /* Date cache tracking */
+ itt :4, /* Instruction TLB tracking */
+ ddt :4, /* Data TLB tracking */
+ reserved:48;
+ } pal_hw_tracking_s;
+} pal_hw_tracking_u_t;
+
+/*
+ * Hardware tracking status.
+ */
+static inline s64
+ia64_pal_mc_hw_tracking (u64 *status)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
+ if (status)
+ *status = iprv.v0;
+ return iprv.status;
+}
+
/* Register a platform dependent location with PAL to which it can save
* minimal processor state in the event of a machine check or initialization
* event.
*/
static inline s64
-ia64_pal_mc_register_mem (u64 physical_addr)
+ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
{
struct ia64_pal_retval iprv;
- PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0);
+ PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
+ if (req_size)
+ *req_size = iprv.v0;
return iprv.status;
}
@@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
return iprv.status;
}
+typedef union pal_vp_info_u {
+ u64 pvi_val;
+ struct {
+ u64 index: 48, /* virtual feature set info */
+ vmm_id: 16; /* feature set id */
+ } pal_vp_info_s;
+} pal_vp_info_u_t;
+
+/*
+ * Returns infomation about virtual processor features
+ */
+static inline s64
+ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
+ if (vp_info)
+ *vp_info = iprv.v0;
+ if (vmm_id)
+ *vmm_id = iprv.v1;
+ return iprv.status;
+}
+
typedef union pal_itr_valid_u {
u64 piv_val;
struct {
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e6204f14f614..ed70862ea247 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -371,7 +371,7 @@ pgd_index (unsigned long address)
/* The offset in the 1-level directory is given by the 3 region bits
(61..63) and the level-1 bits. */
static inline pgd_t*
-pgd_offset (struct mm_struct *mm, unsigned long address)
+pgd_offset (const struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
}
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index f4904db3b057..89594b442f83 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -296,6 +296,9 @@ enum {
EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SAL_PLAT_BUS_ERR_SECT_GUID \
EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
+ EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
+ 0xca, 0x4d)
#define MAX_CACHE_ERRORS 6
#define MAX_TLB_ERRORS 6
@@ -879,6 +882,24 @@ extern void ia64_jump_to_sal(struct sal_to_os_boot *);
extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+#define PALO_MAX_TLB_PURGES 0xFFFF
+#define PALO_SIG "PALO"
+
+struct palo_table {
+ u8 signature[4]; /* Should be "PALO" */
+ u32 length;
+ u8 minor_revision;
+ u8 major_revision;
+ u8 checksum;
+ u8 reserved1[5];
+ u16 max_tlb_purges;
+ u8 reserved2[6];
+};
+
+#define NPTCG_FROM_PAL 0
+#define NPTCG_FROM_PALO 1
+#define NPTCG_FROM_KERNEL_PARAMETER 2
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_SAL_H */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 4fa733dd417a..ec5f355fb7e3 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,9 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait);
+
#define hard_smp_processor_id() ia64_get_lid()
#ifdef CONFIG_SMP
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 595112bca3cc..dff8128fa58e 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -210,6 +210,13 @@ struct task_struct;
extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
+# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
+#else
+# define IA64_ACCOUNT_ON_SWITCH(p,n)
+#endif
+
#ifdef CONFIG_PERFMON
DECLARE_PER_CPU(unsigned long, pfm_syst_info);
# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@@ -222,6 +229,7 @@ extern void ia64_load_extra (struct task_struct *task);
|| IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
+ IA64_ACCOUNT_ON_SWITCH(prev, next); \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
@@ -266,6 +274,10 @@ void cpu_idle_wait(void);
void default_idle(void);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 93d83cbe0c8c..6da8069a0f77 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -31,6 +31,12 @@ struct thread_info {
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ __u64 ac_stamp;
+ __u64 ac_leave;
+ __u64 ac_stime;
+ __u64 ac_utime;
+#endif
};
#define THREAD_SIZE KERNEL_STACK_SIZE
@@ -62,9 +68,17 @@ struct thread_info {
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->ac_stime = 0; \
+ task_thread_info(p)->ac_utime = 0; \
+ task_thread_info(p)->task = (p);
+#else
#define setup_thread_stack(p, org) \
*task_thread_info(p) = *task_thread_info(org); \
task_thread_info(p)->task = (p);
+#endif
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
index 26edcb750f9f..20d8a39680c2 100644
--- a/include/asm-ia64/tlb.h
+++ b/include/asm-ia64/tlb.h
@@ -64,6 +64,32 @@ struct mmu_gather {
struct page *pages[FREE_PTE_NR];
};
+struct ia64_tr_entry {
+ u64 ifa;
+ u64 itir;
+ u64 pte;
+ u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+
+extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_RID_MASK 0x00000000ffffff00L
+#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
+
/* Users of the generic TLB shootdown code must declare this storage space. */
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index 7774a1cac0cc..3be25dfed164 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -17,6 +17,7 @@
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
+extern void setup_ptcg_sem(int max_purges, int from_palo);
/*
* Flush everything (kernel mapping may also have changed due to