diff options
50 files changed, 343 insertions, 157 deletions
diff --git a/arch/i386/boot/pm.c b/arch/i386/boot/pm.c index 6be9ca811d17..09fb342cc62e 100644 --- a/arch/i386/boot/pm.c +++ b/arch/i386/boot/pm.c @@ -122,7 +122,11 @@ static void setup_gdt(void) /* DS: data, read/write, 4 GB, base 0 */ [GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff), }; - struct gdt_ptr gdt; + /* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead + of the gdt_ptr contents. Thus, make it static so it will + stay in memory, at least long enough that we switch to the + proper kernel GDT. */ + static struct gdt_ptr gdt; gdt.len = sizeof(boot_gdt)-1; gdt.ptr = (u32)&boot_gdt + (ds() << 4); diff --git a/arch/ia64/hp/sim/hpsim_console.c b/arch/ia64/hp/sim/hpsim_console.c index 6e149c8ab835..01663bc42b1a 100644 --- a/arch/ia64/hp/sim/hpsim_console.c +++ b/arch/ia64/hp/sim/hpsim_console.c @@ -21,6 +21,7 @@ #include <asm/machvec.h> #include <asm/pgtable.h> #include <asm/sal.h> +#include <asm/hpsim.h> #include "hpsim_ssc.h" @@ -28,7 +29,7 @@ static int simcons_init (struct console *, char *); static void simcons_write (struct console *, const char *, unsigned); static struct tty_driver *simcons_console_device (struct console *, int *); -struct console hpsim_cons = { +static struct console hpsim_cons = { .name = "simcons", .write = simcons_write, .device = simcons_console_device, @@ -58,7 +59,18 @@ simcons_write (struct console *cons, const char *buf, unsigned count) static struct tty_driver *simcons_console_device (struct console *c, int *index) { - extern struct tty_driver *hp_simserial_driver; *index = c->index; return hp_simserial_driver; } + +int simcons_register(void) +{ + if (!ia64_platform_is("hpsim")) + return 1; + + if (hpsim_cons.flags & CON_ENABLED) + return 1; + + register_console(&hpsim_cons); + return 0; +} diff --git a/arch/ia64/hp/sim/hpsim_setup.c b/arch/ia64/hp/sim/hpsim_setup.c index f2297192a582..f629e903ebc7 100644 --- a/arch/ia64/hp/sim/hpsim_setup.c +++ b/arch/ia64/hp/sim/hpsim_setup.c @@ -21,6 +21,7 @@ #include <asm/machvec.h> #include <asm/pgtable.h> #include <asm/sal.h> +#include <asm/hpsim.h> #include "hpsim_ssc.h" @@ -41,11 +42,5 @@ hpsim_setup (char **cmdline_p) { ROOT_DEV = Root_SDA1; /* default to first SCSI drive */ -#ifdef CONFIG_HP_SIMSERIAL_CONSOLE - { - extern struct console hpsim_cons; - if (ia64_platform_is("hpsim")) - register_console(&hpsim_cons); - } -#endif + simcons_register(); } diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index f26077a773d5..4017696ada63 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -22,6 +22,9 @@ #include <linux/bitops.h> #include <asm/system.h> #include <asm/irq.h> +#include <asm/hpsim.h> + +#include "hpsim_ssc.h" #define SIMETH_RECV_MAX 10 @@ -35,12 +38,6 @@ #define SIMETH_FRAME_SIZE ETH_FRAME_LEN -#define SSC_NETDEV_PROBE 100 -#define SSC_NETDEV_SEND 101 -#define SSC_NETDEV_RECV 102 -#define SSC_NETDEV_ATTACH 103 -#define SSC_NETDEV_DETACH 104 - #define NETWORK_INTR 8 struct simeth_local { @@ -124,9 +121,6 @@ simeth_probe (void) return r; } -extern long ia64_ssc (long, long, long, long, int); -extern void ia64_ssc_connect_irq (long intr, long irq); - static inline int netdev_probe(char *name, unsigned char *ether) { diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index e62694f8ef75..4552a1cf5b33 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/timer.h> #include <asm/irq.h> +#include "hpsim_ssc.h" #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -59,8 +60,6 @@ struct disk_stat { unsigned count; }; -extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr); - static int desc[16] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index c47c8acc96e3..00a4599e5f47 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -82,7 +82,7 @@ struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { - [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR + [0 ... IA64_NUM_VECTORS - 1] = -1 }; static cpumask_t vector_table[IA64_NUM_VECTORS] = { @@ -179,7 +179,7 @@ static void __clear_irq_vector(int irq) domain = cfg->domain; cpus_and(mask, cfg->domain, cpu_online_map); for_each_cpu_mask(cpu, mask) - per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; + per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; @@ -249,7 +249,7 @@ void __setup_vector_irq(int cpu) /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) - per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR; + per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) @@ -432,10 +432,18 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) } else if (unlikely(IS_RESCHEDULE(vector))) kstat_this_cpu.irqs[vector]++; else { + int irq = local_vector_to_irq(vector); + ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); - generic_handle_irq(local_vector_to_irq(vector)); + if (unlikely(irq < 0)) { + printk(KERN_ERR "%s: Unexpected interrupt " + "vector %d on CPU %d is not mapped " + "to any IRQ!\n", __FUNCTION__, vector, + smp_processor_id()); + } else + generic_handle_irq(irq); /* * Disable interrupts and send EOI: @@ -483,6 +491,7 @@ void ia64_process_pending_intr(void) kstat_this_cpu.irqs[vector]++; else { struct pt_regs *old_regs = set_irq_regs(NULL); + int irq = local_vector_to_irq(vector); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); @@ -493,8 +502,15 @@ void ia64_process_pending_intr(void) * it will work. I hope it works!. * Probably could shared code. */ - vectors_in_migration[local_vector_to_irq(vector)]=0; - generic_handle_irq(local_vector_to_irq(vector)); + if (unlikely(irq < 0)) { + printk(KERN_ERR "%s: Unexpected interrupt " + "vector %d on CPU %d not being mapped " + "to any IRQ!!\n", __FUNCTION__, vector, + smp_processor_id()); + } else { + vectors_in_migration[irq]=0; + generic_handle_irq(irq); + } set_irq_regs(old_regs); /* diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 407efea04bf5..9e392a30d197 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -60,6 +60,7 @@ #include <asm/smp.h> #include <asm/system.h> #include <asm/unistd.h> +#include <asm/hpsim.h> #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) # error "struct cpuinfo_ia64 too big!" @@ -389,13 +390,8 @@ early_console_setup (char *cmdline) if (!efi_setup_pcdp_console(cmdline)) earlycons++; #endif -#ifdef CONFIG_HP_SIMSERIAL_CONSOLE - { - extern struct console hpsim_cons; - register_console(&hpsim_cons); + if (!simcons_register()) earlycons++; - } -#endif return (earlycons) ? 0 : -1; } @@ -960,6 +956,11 @@ cpu_init (void) /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); + + /* Clear any pending interrupts left by SAL/EFI */ + while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) + ia64_eoi(); + #ifdef CONFIG_SMP normal_xtp(); #endif diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 62209dcf06d3..308772f7cddc 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -58,6 +58,7 @@ #include <asm/system.h> #include <asm/tlbflush.h> #include <asm/unistd.h> +#include <asm/sn/arch.h> #define SMP_DEBUG 0 @@ -730,6 +731,11 @@ int __cpu_disable(void) return (-EBUSY); } + if (ia64_platform_is("sn2")) { + if (!sn_cpu_disable_allowed(cpu)) + return -EBUSY; + } + cpu_clear(cpu, cpu_online_map); if (migrate_platform_irqs(cpu)) { diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 7ac8592a35b6..d3c538be466c 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -17,6 +17,7 @@ #include <linux/bootmem.h> #include <linux/efi.h> #include <linux/mm.h> +#include <linux/nmi.h> #include <linux/swap.h> #include <asm/meminit.h> @@ -56,6 +57,8 @@ void show_mem(void) present = pgdat->node_present_pages; for(i = 0; i < pgdat->node_spanned_pages; i++) { struct page *page; + if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) + touch_nmi_watchdog(); if (pfn_valid(pgdat->node_start_pfn + i)) page = pfn_to_page(pgdat->node_start_pfn + i); else { diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 0dbf0e81f8c0..0d34585058c8 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/nmi.h> #include <linux/swap.h> #include <linux/bootmem.h> #include <linux/acpi.h> @@ -533,6 +534,8 @@ void show_mem(void) present = pgdat->node_present_pages; for(i = 0; i < pgdat->node_spanned_pages; i++) { struct page *page; + if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) + touch_nmi_watchdog(); if (pfn_valid(pgdat->node_start_pfn + i)) page = pfn_to_page(pgdat->node_start_pfn + i); else { diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c index 2c3f9dfca78b..b663168da55c 100644 --- a/arch/ia64/sn/kernel/huberror.c +++ b/arch/ia64/sn/kernel/huberror.c @@ -185,11 +185,14 @@ void hubiio_crb_error_handler(struct hubdev_info *hubdev_info) */ void hub_error_init(struct hubdev_info *hubdev_info) { + if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, - "SN_hub_error", (void *)hubdev_info)) + "SN_hub_error", (void *)hubdev_info)) { printk("hub_error_init: Failed to request_irq for 0x%p\n", hubdev_info); - return; + return; + } + sn_set_err_irq_affinity(SGI_II_ERROR); } @@ -202,11 +205,14 @@ void hub_error_init(struct hubdev_info *hubdev_info) */ void ice_error_init(struct hubdev_info *hubdev_info) { + if (request_irq (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error", - (void *)hubdev_info)) + (void *)hubdev_info)) { printk("ice_error_init: request_irq() error hubdev_info 0x%p\n", hubdev_info); - return; + return; + } + sn_set_err_irq_affinity(SGI_TIO_ERROR); } diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 360047389449..0f9b12683bf3 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -19,6 +19,7 @@ #include <asm/sn/pcidev.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/sn_sal.h> +#include <asm/sn/sn_feature_sets.h> static void force_interrupt(int irq); static void register_intr_pda(struct sn_irq_info *sn_irq_info); @@ -233,6 +234,20 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) (void)sn_retarget_vector(sn_irq_info, nasid, slice); } +#ifdef CONFIG_SMP +void sn_set_err_irq_affinity(unsigned int irq) +{ + /* + * On systems which support CPU disabling (SHub2), all error interrupts + * are targetted at the boot CPU. + */ + if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) + set_irq_affinity_info(irq, cpu_physical_id(0), 0); +} +#else +void sn_set_err_irq_affinity(unsigned int irq) { } +#endif + static void sn_mask_irq(unsigned int irq) { diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 033c8a9f000e..f3c69329e145 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -40,6 +40,7 @@ #include <asm/sn/shub_mmr.h> #include <asm/sn/nodepda.h> #include <asm/sn/rw_mmr.h> +#include <asm/sn/sn_feature_sets.h> DEFINE_PER_CPU(struct ptc_stats, ptcstats); DECLARE_PER_CPU(struct ptc_stats, ptcstats); @@ -429,6 +430,31 @@ void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) sn_send_IPI_phys(nasid, physid, vector, delivery_mode); } +#ifdef CONFIG_HOTPLUG_CPU +/** + * sn_cpu_disable_allowed - Determine if a CPU can be disabled. + * @cpu - CPU that is requested to be disabled. + * + * CPU disable is only allowed on SHub2 systems running with a PROM + * that supports CPU disable. It is not permitted to disable the boot processor. + */ +bool sn_cpu_disable_allowed(int cpu) +{ + if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) { + if (cpu != 0) + return true; + else + printk(KERN_WARNING + "Disabling the boot processor is not allowed.\n"); + + } else + printk(KERN_WARNING + "CPU disable is not supported on this system.\n"); + + return false; +} +#endif /* CONFIG_HOTPLUG_CPU */ + #ifdef CONFIG_PROC_FS #define PTC_BASENAME "sgi_sn/ptc_statistics" diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index df8d5bed6119..1a8e49607f11 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -66,7 +66,8 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) } sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info); - if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) { + objbuf = vmalloc(sz); + if (objbuf == NULL) { printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz); e = -ENOMEM; goto out; diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 42485ad50ceb..ab3eaf85fe4d 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -145,6 +145,7 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont printk(KERN_WARNING "pcibr cannot allocate interrupt for error handler\n"); } + sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); /* * Update the Bridge with the "kernel" pagesize diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index d798dd4d0dc4..ef048a674772 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -654,6 +654,8 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont __FUNCTION__, SGI_TIOCA_ERROR, (int)tioca_common->ca_common.bs_persist_busnum); + sn_set_err_irq_affinity(SGI_TIOCA_ERROR); + /* Setup locality information */ controller->node = tioca_kern->ca_closest_node; return tioca_common; diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 84b72b27e27f..cee9379d44e0 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -1034,6 +1034,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont tioce_common->ce_pcibus.bs_persist_segment, tioce_common->ce_pcibus.bs_persist_busnum); + sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); return tioce_common; } diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 9a8c9af43b22..d6a38cd5018e 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -188,33 +188,21 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node( struct device_node *dn) { struct ibmebus_dev *dev; - const char *loc_code; - int length; - - loc_code = of_get_property(dn, "ibm,loc-code", NULL); - if (!loc_code) { - printk(KERN_WARNING "%s: node %s missing 'ibm,loc-code'\n", - __FUNCTION__, dn->name ? dn->name : "<unknown>"); - return ERR_PTR(-EINVAL); - } - - if (strlen(loc_code) == 0) { - printk(KERN_WARNING "%s: 'ibm,loc-code' is invalid\n", - __FUNCTION__); - return ERR_PTR(-EINVAL); - } + int i, len, bus_len; dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL); - if (!dev) { + if (!dev) return ERR_PTR(-ENOMEM); - } dev->ofdev.node = of_node_get(dn); - length = strlen(loc_code); - memcpy(dev->ofdev.dev.bus_id, loc_code - + (length - min(length, BUS_ID_SIZE - 1)), - min(length, BUS_ID_SIZE - 1)); + len = strlen(dn->full_name + 1); + bus_len = min(len, BUS_ID_SIZE - 1); + memcpy(dev->ofdev.dev.bus_id, dn->full_name + 1 + + (len - bus_len), bus_len); + for (i = 0; i < bus_len; i++) + if (dev->ofdev.dev.bus_id[i] == '/') + dev->ofdev.dev.bus_id[i] = '_'; /* Register with generic device framework. */ if (ibmebus_register_device_common(dev, dn->name) != 0) { diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 095a30304c56..106d2921e2d9 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -236,27 +236,34 @@ static irqreturn_t spu_irq_class_0(int irq, void *data) { struct spu *spu; + unsigned long stat, mask; spu = data; - spu->class_0_pending = 1; + + mask = spu_int_mask_get(spu, 0); + stat = spu_int_stat_get(spu, 0); + stat &= mask; + + spin_lock(&spu->register_lock); + spu->class_0_pending |= stat; + spin_unlock(&spu->register_lock); + spu->stop_callback(spu); + spu_int_stat_clear(spu, 0, stat); + return IRQ_HANDLED; } int spu_irq_class_0_bottom(struct spu *spu) { - unsigned long stat, mask; unsigned long flags; - - spu->class_0_pending = 0; + unsigned long stat; spin_lock_irqsave(&spu->register_lock, flags); - mask = spu_int_mask_get(spu, 0); - stat = spu_int_stat_get(spu, 0); - - stat &= mask; + stat = spu->class_0_pending; + spu->class_0_pending = 0; if (stat & 1) /* invalid DMA alignment */ __spu_trap_dma_align(spu); @@ -267,7 +274,6 @@ spu_irq_class_0_bottom(struct spu *spu) if (stat & 4) /* error on SPU */ __spu_trap_error(spu); - spu_int_stat_clear(spu, 0, stat); spin_unlock_irqrestore(&spu->register_lock, flags); return (stat & 0x7) ? -EIO : 0; diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h index 87d52060fec0..2eb8f92704b4 100644 --- a/arch/powerpc/platforms/ps3/platform.h +++ b/arch/powerpc/platforms/ps3/platform.h @@ -83,6 +83,7 @@ enum ps3_dev_type { PS3_DEV_TYPE_STOR_ROM = TYPE_ROM, /* 5 */ PS3_DEV_TYPE_SB_GPIO = 6, PS3_DEV_TYPE_STOR_FLASH = TYPE_RBC, /* 14 */ + PS3_DEV_TYPE_STOR_DUMMY = 32, PS3_DEV_TYPE_NOACCESS = 255, }; diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 8cc37cfea0f2..1c94824f7b63 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c @@ -349,6 +349,35 @@ int ps3_repository_find_device(struct ps3_repository_device *repo) return result; } + if (tmp.bus_type == PS3_BUS_TYPE_STORAGE) { + /* + * A storage device may show up in the repository before the + * hypervisor has finished probing its type and regions + */ + unsigned int num_regions; + + if (tmp.dev_type == PS3_DEV_TYPE_STOR_DUMMY) { + pr_debug("%s:%u storage device not ready\n", __func__, + __LINE__); + return -ENODEV; + } + + result = ps3_repository_read_stor_dev_num_regions(tmp.bus_index, + tmp.dev_index, + &num_regions); + if (result) { + pr_debug("%s:%d read_stor_dev_num_regions failed\n", + __func__, __LINE__); + return result; + } + + if (!num_regions) { + pr_debug("%s:%u storage device has no regions yet\n", + __func__, __LINE__); + return -ENODEV; + } + } + result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index, &tmp.dev_id); diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index ac2a4b8a4c14..d1630a074acf 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -505,6 +505,8 @@ static void mfc_sr1_set(struct spu *spu, u64 sr1) static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | MFC_STATE1_PROBLEM_STATE_MASK); + sr1 |= MFC_STATE1_MASTER_RUN_CONTROL_MASK; + BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); spu_pdata(spu)->cache.sr1 = sr1; diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index e40c94f5f59d..3b8bf1812dc8 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -955,7 +955,8 @@ static int piix_broken_suspend(void) DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), }, }, - { } + + { } /* terminate list */ }; static const char *oemstrs[] = { "Tecra M3,", @@ -1187,6 +1188,8 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) DMI_MATCH(DMI_PRODUCT_NAME, "M570U"), }, }, + + { } /* terminate list */ }; u32 iocfg; diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index a4e631766eee..57fd30de8f0d 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -371,7 +371,7 @@ static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 }; - pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->pio_mode - XFER_UDMA_0]); + pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]); } static const unsigned int svia_bar_sizes[] = { diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index aca331971201..9b430f20b640 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -70,6 +70,8 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags, DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n", mtd->index, mtd->name); + sb->s_flags = flags; + ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (ret < 0) { up_write(&sb->s_umount); diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 1d3b7a9fc828..8bc727b71696 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -627,7 +627,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, struct inode *inode = OFNI_EDONI_2SFFJ(f); struct page *pg; - pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, + pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode); if (IS_ERR(pg)) return (void *)pg; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index c87dc713b5d7..579cf8a7d4a7 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -316,7 +316,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) if (offset != 0) return; /* Cancel any unstarted writes on this page */ - nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE); + nfs_wb_page_cancel(page->mapping->host, page); } static int nfs_release_page(struct page *page, gfp_t gfp) diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index aea76d0e5fbd..acfc56f9edc0 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -176,7 +176,7 @@ static void nfs_expire_automounts(struct work_struct *work) void nfs_release_automount_timer(void) { if (list_empty(&nfs_automount_list)) - cancel_delayed_work_sync(&nfs_automount_task); + cancel_delayed_work(&nfs_automount_task); } /* diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 62b3ae280310..4b90e17555a9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -646,7 +646,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state rcu_read_lock(); delegation = rcu_dereference(NFS_I(state->inode)->delegation); if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0) - delegation_type = delegation->flags; + delegation_type = delegation->type; rcu_read_unlock(); opendata->o_arg.u.delegation_type = delegation_type; status = nfs4_open_recover(opendata, state); @@ -1434,7 +1434,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) } res = d_add_unique(dentry, igrab(state->inode)); if (res != NULL) - dentry = res; + path.dentry = res; nfs4_intent_set_file(nd, &path, state); return res; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 46139003ea0c..8ed593766f16 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -911,13 +911,13 @@ static int nfs_parse_mount_options(char *raw, kfree(string); switch (token) { - case Opt_udp: + case Opt_xprt_udp: mnt->flags &= ~NFS_MOUNT_TCP; mnt->nfs_server.protocol = IPPROTO_UDP; mnt->timeo = 7; mnt->retrans = 5; break; - case Opt_tcp: + case Opt_xprt_tcp: mnt->flags |= NFS_MOUNT_TCP; mnt->nfs_server.protocol = IPPROTO_TCP; mnt->timeo = 600; @@ -936,10 +936,10 @@ static int nfs_parse_mount_options(char *raw, kfree(string); switch (token) { - case Opt_udp: + case Opt_xprt_udp: mnt->mount_server.protocol = IPPROTO_UDP; break; - case Opt_tcp: + case Opt_xprt_tcp: mnt->mount_server.protocol = IPPROTO_TCP; break; default: @@ -1153,20 +1153,20 @@ static int nfs_validate_mount_data(struct nfs_mount_data **options, c = strchr(dev_name, ':'); if (c == NULL) return -EINVAL; - len = c - dev_name - 1; + len = c - dev_name; if (len > sizeof(data->hostname)) - return -EINVAL; + return -ENAMETOOLONG; strncpy(data->hostname, dev_name, len); args.nfs_server.hostname = data->hostname; c++; if (strlen(c) > NFS_MAXPATHLEN) - return -EINVAL; + return -ENAMETOOLONG; args.nfs_server.export_path = c; status = nfs_try_mount(&args, mntfh); if (status) - return -EINVAL; + return status; /* * Translate to nfs_mount_data, which nfs_fill_super @@ -1677,7 +1677,7 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options, /* while calculating len, pretend ':' is '\0' */ len = c - dev_name; if (len > NFS4_MAXNAMLEN) - return -EINVAL; + return -ENAMETOOLONG; *hostname = kzalloc(len, GFP_KERNEL); if (*hostname == NULL) return -ENOMEM; @@ -1686,7 +1686,7 @@ static int nfs4_validate_mount_data(struct nfs4_mount_data **options, c++; /* step over the ':' */ len = strlen(c); if (len > NFS4_MAXPATHLEN) - return -EINVAL; + return -ENAMETOOLONG; *mntpath = kzalloc(len + 1, GFP_KERNEL); if (*mntpath == NULL) return -ENOMEM; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ef97e0c0f5b1..0d7a77cc394b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1396,6 +1396,50 @@ out: return ret; } +int nfs_wb_page_cancel(struct inode *inode, struct page *page) +{ + struct nfs_page *req; + loff_t range_start = page_offset(page); + loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + struct writeback_control wbc = { + .bdi = page->mapping->backing_dev_info, + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = range_start, + .range_end = range_end, + }; + int ret = 0; + + BUG_ON(!PageLocked(page)); + for (;;) { + req = nfs_page_find_request(page); + if (req == NULL) + goto out; + if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { + nfs_release_request(req); + break; + } + if (nfs_lock_request_dontget(req)) { + nfs_inode_remove_request(req); + /* + * In case nfs_inode_remove_request has marked the + * page as being dirty + */ + cancel_dirty_page(page, PAGE_CACHE_SIZE); + nfs_unlock_request(req); + break; + } + ret = nfs_wait_on_request(req); + if (ret < 0) + goto out; + } + if (!PagePrivate(page)) + return 0; + ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE); +out: + return ret; +} + int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) { loff_t range_start = page_offset(page); diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index b4acc7f3c374..e6ea293f303c 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); static inline int kmem_shake_allow(gfp_t gfp_mask) { - return (gfp_mask & __GFP_WAIT); + return (gfp_mask & __GFP_WAIT) != 0; } #endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index fd4105d662e0..d9c40fe64195 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -652,7 +652,7 @@ xfs_probe_cluster( for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - size_t pg_offset, len = 0; + size_t pg_offset, pg_len = 0; if (tindex == tlast) { pg_offset = @@ -665,16 +665,16 @@ xfs_probe_cluster( pg_offset = PAGE_CACHE_SIZE; if (page->index == tindex && !TestSetPageLocked(page)) { - len = xfs_probe_page(page, pg_offset, mapped); + pg_len = xfs_probe_page(page, pg_offset, mapped); unlock_page(page); } - if (!len) { + if (!pg_len) { done = 1; break; } - total += len; + total += pg_len; tindex++; } diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c index bb72c3d4141f..81565dea9af7 100644 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ b/fs/xfs/linux-2.6/xfs_globals.c @@ -46,7 +46,7 @@ xfs_param_t xfs_params = { .inherit_nosym = { 0, 0, 1 }, .rotorstep = { 1, 1, 255 }, .inherit_nodfrg = { 0, 1, 1 }, - .fstrm_timer = { 1, 50, 3600*100}, + .fstrm_timer = { 1, 30*100, 3600*100}, }; /* diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 2d274b23ade5..6ff0f4de1630 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -120,7 +120,8 @@ xfs_Gqm_init(void) * Initialize the dquot hash tables. */ udqhash = kmem_zalloc_greedy(&hsize, - XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH, + XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), + XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t), KM_SLEEP | KM_MAYFAIL | KM_LARGE); gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE); hsize /= sizeof(xfs_dqhash_t); diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h index a27a7c8c0526..855da0408647 100644 --- a/fs/xfs/support/debug.h +++ b/fs/xfs/support/debug.h @@ -34,10 +34,10 @@ extern void cmn_err(int, char *, ...) extern void assfail(char *expr, char *f, int l); #define ASSERT_ALWAYS(expr) \ - (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) + (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) #ifndef DEBUG -# define ASSERT(expr) ((void)0) +#define ASSERT(expr) ((void)0) #ifndef STATIC # define STATIC static noinline @@ -49,8 +49,10 @@ extern void assfail(char *expr, char *f, int l); #else /* DEBUG */ -# define ASSERT(expr) ASSERT_ALWAYS(expr) -# include <linux/random.h> +#include <linux/random.h> + +#define ASSERT(expr) \ + (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) #ifndef STATIC # define STATIC noinline diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index aea37df4aa62..26d09e2e1a7f 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -1975,7 +1975,6 @@ xfs_da_do_buf( error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); if (unlikely(error == EFSCORRUPTED)) { if (xfs_error_level >= XFS_ERRLEVEL_LOW) { - int i; cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", (long long)bno); cmn_err(CE_ALERT, "dir: inode %lld\n", diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 9d4c4fbeb3ee..9bfb69e1e885 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -2185,13 +2185,13 @@ xlog_state_do_callback( } cb = iclog->ic_callback; - while (cb != 0) { + while (cb) { iclog->ic_callback_tail = &(iclog->ic_callback); iclog->ic_callback = NULL; LOG_UNLOCK(log, s); /* perform callbacks in the order given */ - for (; cb != 0; cb = cb_next) { + for (; cb; cb = cb_next) { cb_next = cb->cb_next; cb->cb_func(cb->cb_arg, aborted); } @@ -2202,7 +2202,7 @@ xlog_state_do_callback( loopdidcallbacks++; funcdidcallbacks++; - ASSERT(iclog->ic_callback == 0); + ASSERT(iclog->ic_callback == NULL); if (!(iclog->ic_state & XLOG_STATE_IOERROR)) iclog->ic_state = XLOG_STATE_DIRTY; @@ -3242,10 +3242,10 @@ xlog_ticket_put(xlog_t *log, #else /* When we debug, it is easier if tickets are cycled */ ticket->t_next = NULL; - if (log->l_tail != 0) { + if (log->l_tail) { log->l_tail->t_next = ticket; } else { - ASSERT(log->l_freelist == 0); + ASSERT(log->l_freelist == NULL); log->l_freelist = ticket; } log->l_tail = ticket; @@ -3463,7 +3463,7 @@ xlog_verify_iclog(xlog_t *log, s = LOG_LOCK(log); icptr = log->l_iclog; for (i=0; i < log->l_iclog_bufs; i++) { - if (icptr == 0) + if (icptr == NULL) xlog_panic("xlog_verify_iclog: invalid ptr"); icptr = icptr->ic_next; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index fddbb091a86f..8ae6e8e5f3db 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1366,7 +1366,7 @@ xlog_recover_add_to_cont_trans( int old_len; item = trans->r_itemq; - if (item == 0) { + if (item == NULL) { /* finish copying rest of trans header */ xlog_recover_add_item(&trans->r_itemq); ptr = (xfs_caddr_t) &trans->r_theader + @@ -1412,7 +1412,7 @@ xlog_recover_add_to_trans( if (!len) return 0; item = trans->r_itemq; - if (item == 0) { + if (item == NULL) { ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC); if (len == sizeof(xfs_trans_header_t)) xlog_recover_add_item(&trans->r_itemq); @@ -1467,12 +1467,12 @@ xlog_recover_unlink_tid( xlog_recover_t *tp; int found = 0; - ASSERT(trans != 0); + ASSERT(trans != NULL); if (trans == *q) { *q = (*q)->r_next; } else { tp = *q; - while (tp != 0) { + while (tp) { if (tp->r_next == trans) { found = 1; break; @@ -1495,7 +1495,7 @@ xlog_recover_insert_item_backq( xlog_recover_item_t **q, xlog_recover_item_t *item) { - if (*q == 0) { + if (*q == NULL) { item->ri_prev = item->ri_next = item; *q = item; } else { @@ -1899,7 +1899,7 @@ xlog_recover_do_reg_buffer( break; nbits = xfs_contig_bits(data_map, map_size, bit); ASSERT(nbits > 0); - ASSERT(item->ri_buf[i].i_addr != 0); + ASSERT(item->ri_buf[i].i_addr != NULL); ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0); ASSERT(XFS_BUF_COUNT(bp) >= ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT)); diff --git a/include/asm-ia64/hpsim.h b/include/asm-ia64/hpsim.h new file mode 100644 index 000000000000..892ab198a9da --- /dev/null +++ b/include/asm-ia64/hpsim.h @@ -0,0 +1,16 @@ +#ifndef _ASMIA64_HPSIM_H +#define _ASMIA64_HPSIM_H + +#ifndef CONFIG_HP_SIMSERIAL_CONSOLE +static inline int simcons_register(void) { return 1; } +#else +int simcons_register(void); +#endif + +struct tty_driver; +extern struct tty_driver *hp_simserial_driver; + +void ia64_ssc_connect_irq(long intr, long irq); +void ia64_ctl_trace(long on); + +#endif diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index 16adc93d7a72..7caa1f44cd95 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h @@ -81,5 +81,6 @@ extern u8 sn_sharing_domain_size; extern u8 sn_region_size; extern void sn_flush_all_caches(long addr, long bytes); +extern bool sn_cpu_disable_allowed(int cpu); #endif /* _ASM_IA64_SN_ARCH_H */ diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h index 12b54ddb06be..e0487aa97418 100644 --- a/include/asm-ia64/sn/intr.h +++ b/include/asm-ia64/sn/intr.h @@ -60,6 +60,7 @@ extern u64 sn_intr_alloc(nasid_t, int, int, nasid_t, int); extern void sn_intr_free(nasid_t, int, struct sn_irq_info *); extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int); +extern void sn_set_err_irq_affinity(unsigned int); extern struct list_head **sn_irq_lh; #define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h index bfdc36273ed4..8e83ac117ace 100644 --- a/include/asm-ia64/sn/sn_feature_sets.h +++ b/include/asm-ia64/sn/sn_feature_sets.h @@ -31,6 +31,7 @@ extern int sn_prom_feature_available(int id); #define PRF_PAL_CACHE_FLUSH_SAFE 0 #define PRF_DEVICE_FLUSH_LIST 1 #define PRF_HOTPLUG_SUPPORT 2 +#define PRF_CPU_DISABLE_SUPPORT 3 /* --------------------- OS Features -------------------------------*/ diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 8836c0f1f2f7..5bde3980bf49 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -130,6 +130,7 @@ struct spu { u64 flags; u64 dar; u64 dsisr; + u64 class_0_pending; size_t ls_size; unsigned int slb_replace; struct mm_struct *mm; @@ -138,7 +139,6 @@ struct spu { unsigned long long timestamp; pid_t pid; pid_t tgid; - int class_0_pending; spinlock_t register_lock; void (* wbox_callback)(struct spu *spu); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 157dcb055b5c..7250eeadd7b5 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -431,6 +431,7 @@ extern int nfs_sync_mapping_range(struct address_space *, loff_t, loff_t, int); extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page* page); extern int nfs_wb_page_priority(struct inode *inode, struct page* page, int how); +extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) extern int nfs_commit_inode(struct inode *, int); extern struct nfs_write_data *nfs_commit_alloc(void); diff --git a/kernel/sched.c b/kernel/sched.c index b533d6db78aa..deeb1f8e0c30 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -668,7 +668,7 @@ static u64 div64_likely32(u64 divident, unsigned long divisor) /* * Shift right and round: */ -#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) +#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) static unsigned long calc_delta_mine(unsigned long delta_exec, unsigned long weight, @@ -684,10 +684,10 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, * Check whether we'd overflow the 64-bit multiplication: */ if (unlikely(tmp > WMULT_CONST)) - tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight, + tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, WMULT_SHIFT/2); else - tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT); + tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); } @@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq) static void set_load_weight(struct task_struct *p) { - task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime; p->se.wait_runtime = 0; if (task_has_rt_policy(p)) { @@ -2512,7 +2511,7 @@ group_next: * a think about bumping its value to force at least one task to be * moved */ - if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) { + if (*imbalance < busiest_load_per_task) { unsigned long tmp, pwr_now, pwr_move; unsigned int imbn; @@ -2564,10 +2563,8 @@ small_imbalance: pwr_move /= SCHED_LOAD_SCALE; /* Move if we gain throughput */ - if (pwr_move <= pwr_now) - goto out_balanced; - - *imbalance = busiest_load_per_task; + if (pwr_move > pwr_now) + *imbalance = busiest_load_per_task; } return busiest; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index ab18f45f2ab2..c3ee38bd3426 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -283,4 +283,5 @@ void proc_sched_set_task(struct task_struct *p) p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0; #endif p->se.sum_exec_runtime = 0; + p->se.prev_sum_exec_runtime = 0; } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ce39282d9c0d..892616bf2c77 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_add(&cfs_rq->load, se->load.weight); cfs_rq->nr_running++; se->on_rq = 1; + + schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } static inline void @@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_sub(&cfs_rq->load, se->load.weight); cfs_rq->nr_running--; se->on_rq = 0; + + schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); } static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq) @@ -291,7 +295,7 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity) /* * It will always fit into 'long': */ - return (long) (tmp >> WMULT_SHIFT); + return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT)); } static inline void @@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) prev_runtime = se->wait_runtime; __add_wait_runtime(cfs_rq, se, delta_fair); - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); delta_fair = se->wait_runtime - prev_runtime; /* @@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) if (tsk->state & TASK_UNINTERRUPTIBLE) se->block_start = rq_of(cfs_rq)->clock; } - cfs_rq->wait_runtime -= se->wait_runtime; #endif } __dequeue_entity(cfs_rq, se); @@ -671,22 +673,39 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) /* * Preempt the current task with a newly woken task if needed: */ -static int +static void __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, struct sched_entity *curr, unsigned long granularity) { s64 __delta = curr->fair_key - se->fair_key; + unsigned long ideal_runtime, delta_exec; + + /* + * ideal_runtime is compared against sum_exec_runtime, which is + * walltime, hence do not scale. + */ + ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running, + (unsigned long)sysctl_sched_min_granularity); + + /* + * If we executed more than what the latency constraint suggests, + * reduce the rescheduling granularity. This way the total latency + * of how much a task is not scheduled converges to + * sysctl_sched_latency: + */ + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) + granularity = 0; /* * Take scheduling granularity into account - do not * preempt the current task unless the best task has * a larger than sched_granularity fairness advantage: + * + * scale granularity as key space is in fair_clock. */ - if (__delta > niced_granularity(curr, granularity)) { + if (__delta > niced_granularity(curr, granularity)) resched_task(rq_of(cfs_rq)->curr); - return 1; - } - return 0; } static inline void @@ -702,6 +721,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end(cfs_rq, se); update_stats_curr_start(cfs_rq, se); set_cfs_rq_curr(cfs_rq, se); + se->prev_sum_exec_runtime = se->sum_exec_runtime; } static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) @@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) { - unsigned long gran, ideal_runtime, delta_exec; struct sched_entity *next; /* @@ -748,22 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) if (next == curr) return; - gran = sched_granularity(cfs_rq); - ideal_runtime = niced_granularity(curr, - max(sysctl_sched_latency / cfs_rq->nr_running, - (unsigned long)sysctl_sched_min_granularity)); - /* - * If we executed more than what the latency constraint suggests, - * reduce the rescheduling granularity. This way the total latency - * of how much a task is not scheduled converges to - * sysctl_sched_latency: - */ - delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime) - gran = 0; - - if (__check_preempt_curr_fair(cfs_rq, next, curr, gran)) - curr->prev_sum_exec_runtime = curr->sum_exec_runtime; + __check_preempt_curr_fair(cfs_rq, next, curr, + sched_granularity(cfs_rq)); } /************************************************** @@ -1121,10 +1126,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) * The statistical average of wait_runtime is about * -granularity/2, so initialize the task with that: */ - if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { + if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) se->wait_runtime = -(sched_granularity(cfs_rq) / 2); - schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); - } __enqueue_entity(cfs_rq, se); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1ee72127462b..bbad2cdb74b7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -560,7 +560,7 @@ static u32 tcp_rto_min(struct sock *sk) struct dst_entry *dst = __sk_dst_get(sk); u32 rto_min = TCP_RTO_MIN; - if (dst_metric_locked(dst, RTAX_RTO_MIN)) + if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) rto_min = dst->metrics[RTAX_RTO_MIN-1]; return rto_min; } diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 8be6a4269e63..a38787a881ea 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -72,7 +72,7 @@ static void check_stdin(void) } } -static void conf_askvalue(struct symbol *sym, const char *def) +static int conf_askvalue(struct symbol *sym, const char *def) { enum symbol_type type = sym_get_type(sym); tristate val; @@ -87,7 +87,7 @@ static void conf_askvalue(struct symbol *sym, const char *def) printf("%s\n", def); line[0] = '\n'; line[1] = 0; - return; + return 0; } switch (input_mode) { @@ -97,23 +97,23 @@ static void conf_askvalue(struct symbol *sym, const char *def) case set_random: if (sym_has_value(sym)) { printf("%s\n", def); - return; + return 0; } break; case ask_new: case ask_silent: if (sym_has_value(sym)) { printf("%s\n", def); - return; + return 0; } check_stdin(); case ask_all: fflush(stdout); fgets(line, 128, stdin); - return; + return 1; case set_default: printf("%s\n", def); - return; + return 1; default: break; } @@ -123,7 +123,7 @@ static void conf_askvalue(struct symbol *sym, const char *def) case S_HEX: case S_STRING: printf("%s\n", def); - return; + return 1; default: ; } @@ -174,6 +174,7 @@ static void conf_askvalue(struct symbol *sym, const char *def) break; } printf("%s", line); + return 1; } int conf_string(struct menu *menu) @@ -187,7 +188,8 @@ int conf_string(struct menu *menu) def = sym_get_string_value(sym); if (sym_get_string_value(sym)) printf("[%s] ", def); - conf_askvalue(sym, def); + if (!conf_askvalue(sym, def)) + return 0; switch (line[0]) { case '\n': break; @@ -240,7 +242,8 @@ static int conf_sym(struct menu *menu) if (menu_has_help(menu)) printf("/?"); printf("] "); - conf_askvalue(sym, sym_get_string_value(sym)); + if (!conf_askvalue(sym, sym_get_string_value(sym))) + return 0; strip(line); switch (line[0]) { |