From 472118e63de7938c89f0f4fd3e0a80986e4c600f Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 10 May 2007 09:44:42 -0700 Subject: [IA64] Wire up epoll_pwait and utimensat Another day, another pair of new system calls. Signed-off-by: Tony Luck --- include/asm-ia64/unistd.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 861c8ec87b09..f049bc40ca7d 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -294,11 +294,13 @@ #define __NR_vmsplice 1302 /* 1303 reserved for move_pages */ #define __NR_getcpu 1304 +#define __NR_epoll_pwait 1305 +#define __NR_utimensat 1306 #ifdef __KERNEL__ -#define NR_syscalls 281 /* length of syscall table */ +#define NR_syscalls 283 /* length of syscall table */ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -- cgit v1.2.3 From 25d61578daae697c4a0eb817f42a868af9824f82 Mon Sep 17 00:00:00 2001 From: John Keller Date: Thu, 10 May 2007 22:42:44 -0700 Subject: [IA64] SN: validate smp_affinity mask on intr redirect On SN, only allow one bit to be set in the smp_affinty mask when redirecting an interrupt. Currently setting multiple bits is allowed, but only the first bit is used in determining the CPU to redirect to. This has caused confusion among some customers. [akpm@linux-foundation.org: fixes] Signed-off-by: John Keller Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/irq.c | 11 +++++++++++ include/asm-ia64/irq.h | 6 ++++++ kernel/irq/proc.c | 7 +++++++ 3 files changed, 24 insertions(+) (limited to 'include') diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ce49c85c928f..b4c239685d2e 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -104,6 +104,17 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) irq_redir[irq] = (char) (redir & 0xff); } } + +bool is_affinity_mask_valid(cpumask_t cpumask) +{ + if (ia64_platform_is("sn2")) { + /* Only allow one CPU to be specified in the smp_affinity mask */ + if (cpus_weight(cpumask) != 1) + return false; + } + return true; +} + #endif /* CONFIG_SMP */ #ifdef CONFIG_HOTPLUG_CPU diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h index 79479e2c6966..67221615e317 100644 --- a/include/asm-ia64/irq.h +++ b/include/asm-ia64/irq.h @@ -11,6 +11,9 @@ * 02/29/00 D.Mosberger moved most things into hw_irq.h */ +#include +#include + #define NR_IRQS 256 #define NR_IRQ_VECTORS NR_IRQS @@ -29,5 +32,8 @@ extern void disable_irq (unsigned int); extern void disable_irq_nosync (unsigned int); extern void enable_irq (unsigned int); extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); +bool is_affinity_mask_valid(cpumask_t cpumask); + +#define is_affinity_mask_valid is_affinity_mask_valid #endif /* _ASM_IA64_IRQ_H */ diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index ddde0ef9ccdc..b4f1674fca79 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -27,6 +27,10 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off, return len; } +#ifndef is_affinity_mask_valid +#define is_affinity_mask_valid(val) 1 +#endif + int no_irq_affinity; static int irq_affinity_write_proc(struct file *file, const char __user *buffer, unsigned long count, void *data) @@ -42,6 +46,9 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, if (err) return err; + if (!is_affinity_mask_valid(new_value)) + return -EINVAL; + /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least -- cgit v1.2.3 From cdc7dbdfe6edac177acb32e4ca56b525d0743fe7 Mon Sep 17 00:00:00 2001 From: Anil S Keshavamurthy Date: Fri, 11 May 2007 09:38:40 -0700 Subject: [IA64] fix Kprobes reentrancy In case of reentrance i.e when a probe handler calls a functions which inturn has a probe, we save a previous kprobe information and just single step the reentrant probe without calling the actual probe handler. During this reentracy period, if an interrupt occurs and if probe happens to trigger in the inturrupt path, then we were corrupting the previous kprobe( as we were overriding the previous kprobe info) info their by crashing the system. This patch fixes this issues by having a an array of previous kprobe info struct(with the array size of 2). This similar technique is not needed on i386 and x86_64 because by default interrupts are turn off in the break/int3 exception handler. Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/kprobes.c | 12 ++++++++---- include/asm-ia64/kprobes.h | 4 +++- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 4f5fd0960ba7..72e593e94053 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -370,14 +370,18 @@ static int __kprobes valid_kprobe_addr(int template, int slot, static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { - kcb->prev_kprobe.kp = kprobe_running(); - kcb->prev_kprobe.status = kcb->kprobe_status; + unsigned int i; + i = atomic_add_return(1, &kcb->prev_kprobe_index); + kcb->prev_kprobe[i-1].kp = kprobe_running(); + kcb->prev_kprobe[i-1].status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; - kcb->kprobe_status = kcb->prev_kprobe.status; + unsigned int i; + i = atomic_sub_return(1, &kcb->prev_kprobe_index); + __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i].kp; + kcb->kprobe_status = kcb->prev_kprobe[i].status; } static void __kprobes set_current_kprobe(struct kprobe *p, diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 828ae00e47c1..2abc98b336f3 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -71,13 +71,15 @@ struct prev_kprobe { #define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f) /* per-cpu kprobe control block */ +#define ARCH_PREV_KPROBE_SZ 2 struct kprobe_ctlblk { unsigned long kprobe_status; struct pt_regs jprobe_saved_regs; unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE]; unsigned long *bsp; unsigned long cfm; - struct prev_kprobe prev_kprobe; + atomic_t prev_kprobe_index; + struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ]; }; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry -- cgit v1.2.3 From 2bd62a40f63bd628c43a2f3637b252d0967659b0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 10 May 2007 22:42:53 -0700 Subject: [IA64] Quicklist support for IA64 IA64 is the origin of the quicklist implementation. So cut out the pieces that are now in core code and modify the functions called. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 4 +++ arch/ia64/mm/contig.c | 2 +- arch/ia64/mm/discontig.c | 2 +- arch/ia64/mm/init.c | 51 ---------------------------- include/asm-ia64/pgalloc.h | 82 +++++++++------------------------------------- 5 files changed, 22 insertions(+), 119 deletions(-) (limited to 'include') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 6e41471449c0..de1bff659969 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -31,6 +31,10 @@ config ZONE_DMA def_bool y depends on !IA64_SGI_SN2 +config QUICKLIST + bool + default y + config MMU bool default y diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 44ce5ed9444c..7ac8592a35b6 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -88,7 +88,7 @@ void show_mem(void) printk(KERN_INFO "%d pages shared\n", total_shared); printk(KERN_INFO "%d pages swap cached\n", total_cached); printk(KERN_INFO "Total of %ld pages in page table cache\n", - pgtable_quicklist_total_size()); + quicklist_total_size()); printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); } diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 94844442812a..38085ac18338 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -561,7 +561,7 @@ void show_mem(void) printk(KERN_INFO "%d pages shared\n", total_shared); printk(KERN_INFO "%d pages swap cached\n", total_cached); printk(KERN_INFO "Total of %ld pages in page table cache\n", - pgtable_quicklist_total_size()); + quicklist_total_size()); printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index cffb1e8325e8..c14abefabafa 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -39,9 +39,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); -DEFINE_PER_CPU(long, __pgtable_quicklist_size); - extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; @@ -56,54 +53,6 @@ EXPORT_SYMBOL(vmem_map); struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); -#define MIN_PGT_PAGES 25UL -#define MAX_PGT_FREES_PER_PASS 16L -#define PGT_FRACTION_OF_NODE_MEM 16 - -static inline long -max_pgt_pages(void) -{ - u64 node_free_pages, max_pgt_pages; - -#ifndef CONFIG_NUMA - node_free_pages = nr_free_pages(); -#else - node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES); -#endif - max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; - max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); - return max_pgt_pages; -} - -static inline long -min_pages_to_free(void) -{ - long pages_to_free; - - pages_to_free = pgtable_quicklist_size - max_pgt_pages(); - pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); - return pages_to_free; -} - -void -check_pgt_cache(void) -{ - long pages_to_free; - - if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) - return; - - preempt_disable(); - while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { - while (pages_to_free--) { - free_page((unsigned long)pgtable_quicklist_alloc()); - } - preempt_enable(); - preempt_disable(); - } - preempt_enable(); -} - void lazy_mmu_prot_update (pte_t pte) { diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h index 560c287b1233..67552cad5173 100644 --- a/include/asm-ia64/pgalloc.h +++ b/include/asm-ia64/pgalloc.h @@ -18,71 +18,18 @@ #include #include #include +#include #include -DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist); -#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist) -DECLARE_PER_CPU(long, __pgtable_quicklist_size); -#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size) - -static inline long pgtable_quicklist_total_size(void) -{ - long ql_size = 0; - int cpuid; - - for_each_online_cpu(cpuid) { - ql_size += per_cpu(__pgtable_quicklist_size, cpuid); - } - return ql_size; -} - -static inline void *pgtable_quicklist_alloc(void) -{ - unsigned long *ret = NULL; - - preempt_disable(); - - ret = pgtable_quicklist; - if (likely(ret != NULL)) { - pgtable_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - --pgtable_quicklist_size; - preempt_enable(); - } else { - preempt_enable(); - ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - } - - return ret; -} - -static inline void pgtable_quicklist_free(void *pgtable_entry) -{ -#ifdef CONFIG_NUMA - int nid = page_to_nid(virt_to_page(pgtable_entry)); - - if (unlikely(nid != numa_node_id())) { - free_page((unsigned long)pgtable_entry); - return; - } -#endif - - preempt_disable(); - *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist; - pgtable_quicklist = (unsigned long *)pgtable_entry; - ++pgtable_quicklist_size; - preempt_enable(); -} - static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return pgtable_quicklist_alloc(); + return quicklist_alloc(0, GFP_KERNEL, NULL); } static inline void pgd_free(pgd_t * pgd) { - pgtable_quicklist_free(pgd); + quicklist_free(0, NULL, pgd); } #ifdef CONFIG_PGTABLE_4 @@ -94,12 +41,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return pgtable_quicklist_alloc(); + return quicklist_alloc(0, GFP_KERNEL, NULL); } static inline void pud_free(pud_t * pud) { - pgtable_quicklist_free(pud); + quicklist_free(0, NULL, pud); } #define __pud_free_tlb(tlb, pud) pud_free(pud) #endif /* CONFIG_PGTABLE_4 */ @@ -112,12 +59,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return pgtable_quicklist_alloc(); + return quicklist_alloc(0, GFP_KERNEL, NULL); } static inline void pmd_free(pmd_t * pmd) { - pgtable_quicklist_free(pmd); + quicklist_free(0, NULL, pmd); } #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) @@ -137,28 +84,31 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) { - void *pg = pgtable_quicklist_alloc(); + void *pg = quicklist_alloc(0, GFP_KERNEL, NULL); return pg ? virt_to_page(pg) : NULL; } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { - return pgtable_quicklist_alloc(); + return quicklist_alloc(0, GFP_KERNEL, NULL); } static inline void pte_free(struct page *pte) { - pgtable_quicklist_free(page_address(pte)); + quicklist_free_page(0, NULL, pte); } static inline void pte_free_kernel(pte_t * pte) { - pgtable_quicklist_free(pte); + quicklist_free(0, NULL, pte); } -#define __pte_free_tlb(tlb, pte) pte_free(pte) +static inline void check_pgt_cache(void) +{ + quicklist_trim(0, NULL, 25, 16); +} -extern void check_pgt_cache(void); +#define __pte_free_tlb(tlb, pte) pte_free(pte) #endif /* _ASM_IA64_PGALLOC_H */ -- cgit v1.2.3