summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/time.c7
-rw-r--r--arch/arm/kernel/time.c7
-rw-r--r--arch/arm/mach-pxa/Makefile2
-rw-r--r--arch/arm/mach-pxa/corgi.c55
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c396
-rw-r--r--arch/arm26/Makefile4
-rw-r--r--arch/arm26/kernel/time.c7
-rw-r--r--arch/cris/arch-v10/kernel/time.c2
-rw-r--r--arch/cris/kernel/time.c5
-rw-r--r--arch/frv/kernel/time.c7
-rw-r--r--arch/h8300/kernel/time.c5
-rw-r--r--arch/i386/Kconfig5
-rw-r--r--arch/i386/boot/setup.S2
-rw-r--r--arch/i386/boot/tools/build.c4
-rw-r--r--arch/i386/kernel/dmi_scan.c231
-rw-r--r--arch/i386/kernel/entry.S13
-rw-r--r--arch/i386/kernel/io_apic.c65
-rw-r--r--arch/i386/kernel/kprobes.c35
-rw-r--r--arch/i386/kernel/nmi.c5
-rw-r--r--arch/i386/kernel/setup.c2
-rw-r--r--arch/i386/kernel/time.c13
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c4
-rw-r--r--arch/i386/kernel/traps.c16
-rw-r--r--arch/i386/kernel/vmlinux.lds.S1
-rw-r--r--arch/i386/mach-default/topology.c4
-rw-r--r--arch/i386/mm/discontig.c8
-rw-r--r--arch/i386/mm/fault.c4
-rw-r--r--arch/i386/mm/init.c2
-rw-r--r--arch/i386/oprofile/init.c12
-rw-r--r--arch/i386/oprofile/nmi_int.c4
-rw-r--r--arch/i386/oprofile/nmi_timer_int.c2
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/sys_ia32.c31
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/domain.c396
-rw-r--r--arch/ia64/kernel/irq.c39
-rw-r--r--arch/ia64/kernel/jprobes.S1
-rw-r--r--arch/ia64/kernel/kprobes.c124
-rw-r--r--arch/ia64/kernel/traps.c5
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/lib/flush.S1
-rw-r--r--arch/ia64/mm/fault.c3
-rw-r--r--arch/ia64/sn/kernel/io_init.c2
-rw-r--r--arch/ia64/sn/kernel/tiocx.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c8
-rw-r--r--arch/m32r/kernel/time.c7
-rw-r--r--arch/m68k/bvme6000/rtc.c5
-rw-r--r--arch/m68k/kernel/time.c5
-rw-r--r--arch/m68k/mvme16x/rtc.c4
-rw-r--r--arch/m68knommu/kernel/time.c7
-rw-r--r--arch/mips/kernel/linux32.c16
-rw-r--r--arch/mips/kernel/sysirix.c5
-rw-r--r--arch/mips/kernel/time.c7
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/ppc/kernel/time.c7
-rw-r--r--arch/ppc/syslib/ocp.c2
-rw-r--r--arch/ppc64/kernel/kprobes.c40
-rw-r--r--arch/ppc64/kernel/misc.S6
-rw-r--r--arch/ppc64/kernel/pSeries_reconfig.c2
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c31
-rw-r--r--arch/ppc64/kernel/time.c7
-rw-r--r--arch/ppc64/kernel/traps.c5
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ppc64/mm/fault.c5
-rw-r--r--arch/s390/kernel/time.c5
-rw-r--r--arch/sh/kernel/time.c11
-rw-r--r--arch/sh64/kernel/time.c11
-rw-r--r--arch/sparc/kernel/pcic.c5
-rw-r--r--arch/sparc/kernel/time.c7
-rw-r--r--arch/sparc64/kernel/kprobes.c36
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c24
-rw-r--r--arch/sparc64/kernel/time.c2
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc64/mm/fault.c8
-rw-r--r--arch/sparc64/mm/init.c3
-rw-r--r--arch/sparc64/mm/ultra.S2
-rw-r--r--arch/um/include/um_uaccess.h7
-rw-r--r--arch/v850/kernel/time.c7
-rw-r--r--arch/x86_64/Kconfig5
-rw-r--r--arch/x86_64/boot/setup.S2
-rw-r--r--arch/x86_64/boot/tools/build.c4
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c26
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/entry.S12
-rw-r--r--arch/x86_64/kernel/genapic.c2
-rw-r--r--arch/x86_64/kernel/genapic_cluster.c6
-rw-r--r--arch/x86_64/kernel/io_apic.c112
-rw-r--r--arch/x86_64/kernel/kprobes.c41
-rw-r--r--arch/x86_64/kernel/nmi.c6
-rw-r--r--arch/x86_64/kernel/process.c1
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/setup64.c2
-rw-r--r--arch/x86_64/kernel/smpboot.c10
-rw-r--r--arch/x86_64/kernel/time.c8
-rw-r--r--arch/x86_64/kernel/traps.c14
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86_64/mm/fault.c4
-rw-r--r--arch/x86_64/mm/numa.c6
-rw-r--r--arch/xtensa/kernel/time.c7
102 files changed, 1067 insertions, 1057 deletions
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 8226c5cd788c..67be50b7d80a 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -149,7 +149,7 @@ irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if ((time_status & STA_UNSYNC) == 0
+ if (ntp_synced()
&& xtime.tv_sec > state.last_rtc_update + 660
&& xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
&& xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -502,10 +502,7 @@ do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 8880482dcbff..69449a818dcc 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -102,7 +102,7 @@ static unsigned long next_rtc_update;
*/
static inline void do_set_rtc(void)
{
- if (time_status & STA_UNSYNC || set_rtc == NULL)
+ if (!ntp_synced() || set_rtc == NULL)
return;
if (next_rtc_update &&
@@ -292,10 +292,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index efc2f657184e..33dae99ec2d8 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_PXA27x) += pxa27x.o
obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
-obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o ssp.o
+obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o ssp.o
obj-$(CONFIG_MACH_POODLE) += poodle.o
# Support for blinky lights
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 06ea730e8675..29185acdd9e1 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -39,7 +39,6 @@
#include <asm/mach/sharpsl_param.h>
#include <asm/hardware/scoop.h>
-#include <video/w100fb.h>
#include "generic.h"
@@ -87,7 +86,7 @@ struct platform_device corgiscoop_device = {
* also use scoop functions and this makes the power up/down order
* work correctly.
*/
-static struct platform_device corgissp_device = {
+struct platform_device corgissp_device = {
.name = "corgi-ssp",
.dev = {
.parent = &corgiscoop_device.dev,
@@ -97,41 +96,33 @@ static struct platform_device corgissp_device = {
/*
- * Corgi w100 Frame Buffer Device
+ * Corgi Backlight Device
*/
-static struct w100fb_mach_info corgi_fb_info = {
- .w100fb_ssp_send = corgi_ssp_lcdtg_send,
- .comadj = -1,
- .phadadj = -1,
-};
-
-static struct resource corgi_fb_resources[] = {
- [0] = {
- .start = 0x08000000,
- .end = 0x08ffffff,
- .flags = IORESOURCE_MEM,
+static struct platform_device corgibl_device = {
+ .name = "corgi-bl",
+ .dev = {
+ .parent = &corgifb_device.dev,
},
+ .id = -1,
};
-static struct platform_device corgifb_device = {
- .name = "w100fb",
+
+/*
+ * Corgi Keyboard Device
+ */
+static struct platform_device corgikbd_device = {
+ .name = "corgi-keyboard",
.id = -1,
- .dev = {
- .platform_data = &corgi_fb_info,
- .parent = &corgissp_device.dev,
- },
- .num_resources = ARRAY_SIZE(corgi_fb_resources),
- .resource = corgi_fb_resources,
};
/*
- * Corgi Backlight Device
+ * Corgi Touch Screen Device
*/
-static struct platform_device corgibl_device = {
- .name = "corgi-bl",
+static struct platform_device corgits_device = {
+ .name = "corgi-ts",
.dev = {
- .parent = &corgifb_device.dev,
+ .parent = &corgissp_device.dev,
},
.id = -1,
};
@@ -199,6 +190,11 @@ static void corgi_mci_setpower(struct device *dev, unsigned int vdd)
}
}
+static int corgi_mci_get_ro(struct device *dev)
+{
+ return GPLR(CORGI_GPIO_nSD_WP) & GPIO_bit(CORGI_GPIO_nSD_WP);
+}
+
static void corgi_mci_exit(struct device *dev, void *data)
{
free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data);
@@ -208,11 +204,13 @@ static void corgi_mci_exit(struct device *dev, void *data)
static struct pxamci_platform_data corgi_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.init = corgi_mci_init,
+ .get_ro = corgi_mci_get_ro,
.setpower = corgi_mci_setpower,
.exit = corgi_mci_exit,
};
+
/*
* USB Device Controller
*/
@@ -238,14 +236,13 @@ static struct platform_device *devices[] __initdata = {
&corgiscoop_device,
&corgissp_device,
&corgifb_device,
+ &corgikbd_device,
&corgibl_device,
+ &corgits_device,
};
static void __init corgi_init(void)
{
- corgi_fb_info.comadj=sharpsl_param.comadj;
- corgi_fb_info.phadadj=sharpsl_param.phadadj;
-
pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT);
pxa_set_udc_info(&udc_info);
pxa_set_mci_info(&corgi_mci_platform_data);
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
new file mode 100644
index 000000000000..deac29c00290
--- /dev/null
+++ b/arch/arm/mach-pxa/corgi_lcd.c
@@ -0,0 +1,396 @@
+/*
+ * linux/drivers/video/w100fb.c
+ *
+ * Corgi LCD Specific Code for ATI Imageon w100 (Wallaby)
+ *
+ * Copyright (C) 2005 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <asm/arch/corgi.h>
+#include <asm/mach/sharpsl_param.h>
+#include <video/w100fb.h>
+
+/* Register Addresses */
+#define RESCTL_ADRS 0x00
+#define PHACTRL_ADRS 0x01
+#define DUTYCTRL_ADRS 0x02
+#define POWERREG0_ADRS 0x03
+#define POWERREG1_ADRS 0x04
+#define GPOR3_ADRS 0x05
+#define PICTRL_ADRS 0x06
+#define POLCTRL_ADRS 0x07
+
+/* Resgister Bit Definitions */
+#define RESCTL_QVGA 0x01
+#define RESCTL_VGA 0x00
+
+#define POWER1_VW_ON 0x01 /* VW Supply FET ON */
+#define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */
+#define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */
+
+#define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */
+#define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */
+#define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */
+
+#define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */
+#define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */
+#define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */
+#define POWER0_COM_ON 0x08 /* COM Powewr Supply ON */
+#define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */
+
+#define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */
+#define POWER0_COM_OFF 0x00 /* COM Powewr Supply OFF */
+#define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */
+
+#define PICTRL_INIT_STATE 0x01
+#define PICTRL_INIOFF 0x02
+#define PICTRL_POWER_DOWN 0x04
+#define PICTRL_COM_SIGNAL_OFF 0x08
+#define PICTRL_DAC_SIGNAL_OFF 0x10
+
+#define POLCTRL_SYNC_POL_FALL 0x01
+#define POLCTRL_EN_POL_FALL 0x02
+#define POLCTRL_DATA_POL_FALL 0x04
+#define POLCTRL_SYNC_ACT_H 0x08
+#define POLCTRL_EN_ACT_L 0x10
+
+#define POLCTRL_SYNC_POL_RISE 0x00
+#define POLCTRL_EN_POL_RISE 0x00
+#define POLCTRL_DATA_POL_RISE 0x00
+#define POLCTRL_SYNC_ACT_L 0x00
+#define POLCTRL_EN_ACT_H 0x00
+
+#define PHACTRL_PHASE_MANUAL 0x01
+#define DEFAULT_PHAD_QVGA (9)
+#define DEFAULT_COMADJ (125)
+
+/*
+ * This is only a psuedo I2C interface. We can't use the standard kernel
+ * routines as the interface is write only. We just assume the data is acked...
+ */
+static void lcdtg_ssp_i2c_send(u8 data)
+{
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, data);
+ udelay(10);
+}
+
+static void lcdtg_i2c_send_bit(u8 data)
+{
+ lcdtg_ssp_i2c_send(data);
+ lcdtg_ssp_i2c_send(data | POWER0_COM_DCLK);
+ lcdtg_ssp_i2c_send(data);
+}
+
+static void lcdtg_i2c_send_start(u8 base)
+{
+ lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
+ lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
+ lcdtg_ssp_i2c_send(base);
+}
+
+static void lcdtg_i2c_send_stop(u8 base)
+{
+ lcdtg_ssp_i2c_send(base);
+ lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
+ lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
+}
+
+static void lcdtg_i2c_send_byte(u8 base, u8 data)
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ if (data & 0x80)
+ lcdtg_i2c_send_bit(base | POWER0_COM_DOUT);
+ else
+ lcdtg_i2c_send_bit(base);
+ data <<= 1;
+ }
+}
+
+static void lcdtg_i2c_wait_ack(u8 base)
+{
+ lcdtg_i2c_send_bit(base);
+}
+
+static void lcdtg_set_common_voltage(u8 base_data, u8 data)
+{
+ /* Set Common Voltage to M62332FP via I2C */
+ lcdtg_i2c_send_start(base_data);
+ lcdtg_i2c_send_byte(base_data, 0x9c);
+ lcdtg_i2c_wait_ack(base_data);
+ lcdtg_i2c_send_byte(base_data, 0x00);
+ lcdtg_i2c_wait_ack(base_data);
+ lcdtg_i2c_send_byte(base_data, data);
+ lcdtg_i2c_wait_ack(base_data);
+ lcdtg_i2c_send_stop(base_data);
+}
+
+/* Set Phase Adjuct */
+static void lcdtg_set_phadadj(struct w100fb_par *par)
+{
+ int adj;
+ switch(par->xres) {
+ case 480:
+ case 640:
+ /* Setting for VGA */
+ adj = sharpsl_param.phadadj;
+ if (adj < 0) {
+ adj = PHACTRL_PHASE_MANUAL;
+ } else {
+ adj = ((adj & 0x0f) << 1) | PHACTRL_PHASE_MANUAL;
+ }
+ break;
+ case 240:
+ case 320:
+ default:
+ /* Setting for QVGA */
+ adj = (DEFAULT_PHAD_QVGA << 1) | PHACTRL_PHASE_MANUAL;
+ break;
+ }
+
+ corgi_ssp_lcdtg_send(PHACTRL_ADRS, adj);
+}
+
+static int lcd_inited;
+
+static void lcdtg_hw_init(struct w100fb_par *par)
+{
+ if (!lcd_inited) {
+ int comadj;
+
+ /* Initialize Internal Logic & Port */
+ corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE
+ | PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF);
+
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF
+ | POWER0_COM_OFF | POWER0_VCC5_OFF);
+
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
+
+ /* VDD(+8V), SVSS(-4V) ON */
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
+ mdelay(3);
+
+ /* DAC ON */
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON
+ | POWER0_COM_OFF | POWER0_VCC5_OFF);
+
+ /* INIB = H, INI = L */
+ /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */
+ corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF);
+
+ /* Set Common Voltage */
+ comadj = sharpsl_param.comadj;
+ if (comadj < 0)
+ comadj = DEFAULT_COMADJ;
+ lcdtg_set_common_voltage((POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF), comadj);
+
+ /* VCC5 ON, DAC ON */
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON |
+ POWER0_COM_OFF | POWER0_VCC5_ON);
+
+ /* GVSS(-8V) ON, VDD ON */
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
+ mdelay(2);
+
+ /* COM SIGNAL ON (PICTL[3] = L) */
+ corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIT_STATE);
+
+ /* COM ON, DAC ON, VCC5_ON */
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON
+ | POWER0_COM_ON | POWER0_VCC5_ON);
+
+ /* VW ON, GVSS ON, VDD ON */
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_ON | POWER1_GVSS_ON | POWER1_VDD_ON);
+
+ /* Signals output enable */
+ corgi_ssp_lcdtg_send(PICTRL_ADRS, 0);
+
+ /* Set Phase Adjuct */
+ lcdtg_set_phadadj(par);
+
+ /* Initialize for Input Signals from ATI */
+ corgi_ssp_lcdtg_send(POLCTRL_ADRS, POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE
+ | POLCTRL_DATA_POL_RISE | POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H);
+ udelay(1000);
+
+ lcd_inited=1;
+ } else {
+ lcdtg_set_phadadj(par);
+ }
+
+ switch(par->xres) {
+ case 480:
+ case 640:
+ /* Set Lcd Resolution (VGA) */
+ corgi_ssp_lcdtg_send(RESCTL_ADRS, RESCTL_VGA);
+ break;
+ case 240:
+ case 320:
+ default:
+ /* Set Lcd Resolution (QVGA) */
+ corgi_ssp_lcdtg_send(RESCTL_ADRS, RESCTL_QVGA);
+ break;
+ }
+}
+
+static void lcdtg_suspend(struct w100fb_par *par)
+{
+ /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */
+ mdelay(34);
+
+ /* (1)VW OFF */
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
+
+ /* (2)COM OFF */
+ corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF);
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON);
+
+ /* (3)Set Common Voltage Bias 0V */
+ lcdtg_set_common_voltage(POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0);
+
+ /* (4)GVSS OFF */
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
+
+ /* (5)VCC5 OFF */
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF);
+
+ /* (6)Set PDWN, INIOFF, DACOFF */
+ corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF |
+ PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF);
+
+ /* (7)DAC OFF */
+ corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF);
+
+ /* (8)VDD OFF */
+ corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
+
+ lcd_inited = 0;
+}
+
+static struct w100_tg_info corgi_lcdtg_info = {
+ .change=lcdtg_hw_init,
+ .suspend=lcdtg_suspend,
+ .resume=lcdtg_hw_init,
+};
+
+/*
+ * Corgi w100 Frame Buffer Device
+ */
+
+static struct w100_mem_info corgi_fb_mem = {
+ .ext_cntl = 0x00040003,
+ .sdram_mode_reg = 0x00650021,
+ .ext_timing_cntl = 0x10002a4a,
+ .io_cntl = 0x7ff87012,
+ .size = 0x1fffff,
+};
+
+static struct w100_gen_regs corgi_fb_regs = {
+ .lcd_format = 0x00000003,
+ .lcdd_cntl1 = 0x01CC0000,
+ .lcdd_cntl2 = 0x0003FFFF,
+ .genlcd_cntl1 = 0x00FFFF0D,
+ .genlcd_cntl2 = 0x003F3003,
+ .genlcd_cntl3 = 0x000102aa,
+};
+
+static struct w100_gpio_regs corgi_fb_gpio = {
+ .init_data1 = 0x000000bf,
+ .init_data2 = 0x00000000,
+ .gpio_dir1 = 0x00000000,
+ .gpio_oe1 = 0x03c0feff,
+ .gpio_dir2 = 0x00000000,
+ .gpio_oe2 = 0x00000000,
+};
+
+static struct w100_mode corgi_fb_modes[] = {
+{
+ .xres = 480,
+ .yres = 640,
+ .left_margin = 0x56,
+ .right_margin = 0x55,
+ .upper_margin = 0x03,
+ .lower_margin = 0x00,
+ .crtc_ss = 0x82360056,
+ .crtc_ls = 0xA0280000,
+ .crtc_gs = 0x80280028,
+ .crtc_vpos_gs = 0x02830002,
+ .crtc_rev = 0x00400008,
+ .crtc_dclk = 0xA0000000,
+ .crtc_gclk = 0x8015010F,
+ .crtc_goe = 0x80100110,
+ .crtc_ps1_active = 0x41060010,
+ .pll_freq = 75,
+ .fast_pll_freq = 100,
+ .sysclk_src = CLK_SRC_PLL,
+ .sysclk_divider = 0,
+ .pixclk_src = CLK_SRC_PLL,
+ .pixclk_divider = 2,
+ .pixclk_divider_rotated = 6,
+},{
+ .xres = 240,
+ .yres = 320,
+ .left_margin = 0x27,
+ .right_margin = 0x2e,
+ .upper_margin = 0x01,
+ .lower_margin = 0x00,
+ .crtc_ss = 0x81170027,
+ .crtc_ls = 0xA0140000,
+ .crtc_gs = 0xC0140014,
+ .crtc_vpos_gs = 0x00010141,
+ .crtc_rev = 0x00400008,
+ .crtc_dclk = 0xA0000000,
+ .crtc_gclk = 0x8015010F,
+ .crtc_goe = 0x80100110,
+ .crtc_ps1_active = 0x41060010,
+ .pll_freq = 0,
+ .fast_pll_freq = 0,
+ .sysclk_src = CLK_SRC_XTAL,
+ .sysclk_divider = 0,
+ .pixclk_src = CLK_SRC_XTAL,
+ .pixclk_divider = 1,
+ .pixclk_divider_rotated = 1,
+},
+
+};
+
+static struct w100fb_mach_info corgi_fb_info = {
+ .tg = &corgi_lcdtg_info,
+ .init_mode = INIT_MODE_ROTATED,
+ .mem = &corgi_fb_mem,
+ .regs = &corgi_fb_regs,
+ .modelist = &corgi_fb_modes[0],
+ .num_modes = 2,
+ .gpio = &corgi_fb_gpio,
+ .xtal_freq = 12500000,
+ .xtal_dbl = 0,
+};
+
+static struct resource corgi_fb_resources[] = {
+ [0] = {
+ .start = 0x08000000,
+ .end = 0x08ffffff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device corgifb_device = {
+ .name = "w100fb",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(corgi_fb_resources),
+ .resource = corgi_fb_resources,
+ .dev = {
+ .platform_data = &corgi_fb_info,
+ .parent = &corgissp_device.dev,
+ },
+
+};
diff --git a/arch/arm26/Makefile b/arch/arm26/Makefile
index ada8985530a5..e9cb8ef4f3fb 100644
--- a/arch/arm26/Makefile
+++ b/arch/arm26/Makefile
@@ -17,10 +17,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y)
CFLAGS +=-fno-omit-frame-pointer -mno-sched-prolog
endif
-ifeq ($(CONFIG_DEBUG_INFO),y)
-CFLAGS +=-g
-endif
-
CFLAGS_BOOT :=-mapcs-26 -mcpu=arm3 -msoft-float -Uarm
CFLAGS +=-mapcs-26 -mcpu=arm3 -msoft-float -Uarm
AFLAGS +=-mapcs-26 -mcpu=arm3 -msoft-float
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
index 549a6b2e177e..e66aedd02fad 100644
--- a/arch/arm26/kernel/time.c
+++ b/arch/arm26/kernel/time.c
@@ -114,7 +114,7 @@ static unsigned long next_rtc_update;
*/
static inline void do_set_rtc(void)
{
- if (time_status & STA_UNSYNC || set_rtc == NULL)
+ if (!ntp_synced() || set_rtc == NULL)
return;
//FIXME - timespec.tv_sec is a time_t not unsigned long
@@ -189,10 +189,7 @@ int do_settimeofday(struct timespec *tv)
xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = tv->tv_nsec;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 6b7b4e0802e3..dc3dfe9b4a1a 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -240,7 +240,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* The division here is not time critical since it will run once in
* 11 minutes
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index fa2d4323da25..a2d99b4aedcd 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -114,10 +114,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index 075db6644694..8d6558b00e44 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -85,7 +85,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2
@@ -216,10 +216,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 8a600218334d..af8c5d2057dd 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -116,10 +116,7 @@ int do_settimeofday(struct timespec *tv)
xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = tv->tv_nsec;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 3b3b017e1c15..4b7de3e1e57b 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1318,6 +1318,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_PENDING_IRQ
+ bool
+ depends on GENERIC_HARDIRQS && SMP
+ default y
+
config X86_SMP
bool
depends on SMP && !X86_VOYAGER
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index 8cb420f40c58..ca668d9df164 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -82,7 +82,7 @@ start:
# This is the setup header, and it must start at %cs:2 (old 0x9020:2)
.ascii "HdrS" # header signature
- .word 0x0203 # header version number (>= 0x0105)
+ .word 0x0204 # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
start_sys_seg: .word SYSSEG
diff --git a/arch/i386/boot/tools/build.c b/arch/i386/boot/tools/build.c
index 6835f6d47c31..05798419a6a9 100644
--- a/arch/i386/boot/tools/build.c
+++ b/arch/i386/boot/tools/build.c
@@ -177,7 +177,9 @@ int main(int argc, char ** argv)
die("Output: seek failed");
buf[0] = (sys_size & 0xff);
buf[1] = ((sys_size >> 8) & 0xff);
- if (write(1, buf, 2) != 2)
+ buf[2] = ((sys_size >> 16) & 0xff);
+ buf[3] = ((sys_size >> 24) & 0xff);
+ if (write(1, buf, 4) != 4)
die("Write of image length failed");
return 0; /* Everything is OK */
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index a3cdf894302b..58516e2ac172 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -6,32 +6,28 @@
#include <linux/bootmem.h>
-struct dmi_header {
- u8 type;
- u8 length;
- u16 handle;
-};
-
-#undef DMI_DEBUG
-
-#ifdef DMI_DEBUG
-#define dmi_printk(x) printk x
-#else
-#define dmi_printk(x)
-#endif
-
static char * __init dmi_string(struct dmi_header *dm, u8 s)
{
u8 *bp = ((u8 *) dm) + dm->length;
+ char *str = "";
- if (!s)
- return "";
- s--;
- while (s > 0 && *bp) {
- bp += strlen(bp) + 1;
+ if (s) {
s--;
- }
- return bp;
+ while (s > 0 && *bp) {
+ bp += strlen(bp) + 1;
+ s--;
+ }
+
+ if (*bp != 0) {
+ str = alloc_bootmem(strlen(bp) + 1);
+ if (str != NULL)
+ strcpy(str, bp);
+ else
+ printk(KERN_ERR "dmi_string: out of memory.\n");
+ }
+ }
+
+ return str;
}
/*
@@ -84,69 +80,76 @@ static int __init dmi_checksum(u8 *buf)
return sum == 0;
}
-static int __init dmi_iterate(void (*decode)(struct dmi_header *))
+static char *dmi_ident[DMI_STRING_MAX];
+static LIST_HEAD(dmi_devices);
+
+/*
+ * Save a DMI string
+ */
+static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
{
- u8 buf[15];
- char __iomem *p, *q;
+ char *p, *d = (char*) dm;
- /*
- * no iounmap() for that ioremap(); it would be a no-op, but it's
- * so early in setup that sucker gets confused into doing what
- * it shouldn't if we actually call it.
- */
- p = ioremap(0xF0000, 0x10000);
+ if (dmi_ident[slot])
+ return;
+
+ p = dmi_string(dm, d[string]);
if (p == NULL)
- return -1;
+ return;
- for (q = p; q < p + 0x10000; q += 16) {
- memcpy_fromio(buf, q, 15);
- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
- u16 num = (buf[13] << 8) | buf[12];
- u16 len = (buf[7] << 8) | buf[6];
- u32 base = (buf[11] << 24) | (buf[10] << 16) |
- (buf[9] << 8) | buf[8];
+ dmi_ident[slot] = p;
+}
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we don't know at this point.
- */
- if (buf[14] != 0)
- printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14] >> 4, buf[14] & 0xF);
- else
- printk(KERN_INFO "DMI present.\n");
+static void __init dmi_save_devices(struct dmi_header *dm)
+{
+ int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
+ struct dmi_device *dev;
+
+ for (i = 0; i < count; i++) {
+ char *d = ((char *) dm) + (i * 2);
- dmi_printk((KERN_INFO "%d structures occupying %d bytes.\n",
- num, len));
- dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", base));
+ /* Skip disabled device */
+ if ((*d & 0x80) == 0)
+ continue;
- if (dmi_table(base,len, num, decode) == 0)
- return 0;
+ dev = alloc_bootmem(sizeof(*dev));
+ if (!dev) {
+ printk(KERN_ERR "dmi_save_devices: out of memory.\n");
+ break;
}
+
+ dev->type = *d++ & 0x7f;
+ dev->name = dmi_string(dm, *d);
+ dev->device_data = NULL;
+
+ list_add(&dev->list, &dmi_devices);
}
- return -1;
}
-static char *dmi_ident[DMI_STRING_MAX];
-
-/*
- * Save a DMI string
- */
-static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
+static void __init dmi_save_ipmi_device(struct dmi_header *dm)
{
- char *d = (char*)dm;
- char *p = dmi_string(dm, d[string]);
+ struct dmi_device *dev;
+ void * data;
- if (p == NULL || *p == 0)
+ data = alloc_bootmem(dm->length);
+ if (data == NULL) {
+ printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
return;
- if (dmi_ident[slot])
+ }
+
+ memcpy(data, dm, dm->length);
+
+ dev = alloc_bootmem(sizeof(*dev));
+ if (!dev) {
+ printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
return;
+ }
- dmi_ident[slot] = alloc_bootmem(strlen(p) + 1);
- if(dmi_ident[slot])
- strcpy(dmi_ident[slot], p);
- else
- printk(KERN_ERR "dmi_save_ident: out of memory.\n");
+ dev->type = DMI_DEV_TYPE_IPMI;
+ dev->name = "IPMI controller";
+ dev->device_data = data;
+
+ list_add(&dev->list, &dmi_devices);
}
/*
@@ -156,42 +159,69 @@ static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
*/
static void __init dmi_decode(struct dmi_header *dm)
{
- u8 *data __attribute__((__unused__)) = (u8 *)dm;
-
switch(dm->type) {
- case 0:
- dmi_printk(("BIOS Vendor: %s\n", dmi_string(dm, data[4])));
+ case 0: /* BIOS Information */
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
- dmi_printk(("BIOS Version: %s\n", dmi_string(dm, data[5])));
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
- dmi_printk(("BIOS Release: %s\n", dmi_string(dm, data[8])));
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
break;
- case 1:
- dmi_printk(("System Vendor: %s\n", dmi_string(dm, data[4])));
+ case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
- dmi_printk(("Product Name: %s\n", dmi_string(dm, data[5])));
dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
- dmi_printk(("Version: %s\n", dmi_string(dm, data[6])));
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
- dmi_printk(("Serial Number: %s\n", dmi_string(dm, data[7])));
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
break;
- case 2:
- dmi_printk(("Board Vendor: %s\n", dmi_string(dm, data[4])));
+ case 2: /* Base Board Information */
dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
- dmi_printk(("Board Name: %s\n", dmi_string(dm, data[5])));
dmi_save_ident(dm, DMI_BOARD_NAME, 5);
- dmi_printk(("Board Version: %s\n", dmi_string(dm, data[6])));
dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
break;
+ case 10: /* Onboard Devices Information */
+ dmi_save_devices(dm);
+ break;
+ case 38: /* IPMI Device Information */
+ dmi_save_ipmi_device(dm);
}
}
void __init dmi_scan_machine(void)
{
- if (dmi_iterate(dmi_decode))
- printk(KERN_INFO "DMI not present.\n");
+ u8 buf[15];
+ char __iomem *p, *q;
+
+ /*
+ * no iounmap() for that ioremap(); it would be a no-op, but it's
+ * so early in setup that sucker gets confused into doing what
+ * it shouldn't if we actually call it.
+ */
+ p = ioremap(0xF0000, 0x10000);
+ if (p == NULL)
+ goto out;
+
+ for (q = p; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf, q, 15);
+ if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
+ u16 num = (buf[13] << 8) | buf[12];
+ u16 len = (buf[7] << 8) | buf[6];
+ u32 base = (buf[11] << 24) | (buf[10] << 16) |
+ (buf[9] << 8) | buf[8];
+
+ /*
+ * DMI version 0.0 means that the real version is taken from
+ * the SMBIOS version, which we don't know at this point.
+ */
+ if (buf[14] != 0)
+ printk(KERN_INFO "DMI %d.%d present.\n",
+ buf[14] >> 4, buf[14] & 0xF);
+ else
+ printk(KERN_INFO "DMI present.\n");
+
+ if (dmi_table(base,len, num, dmi_decode) == 0)
+ return;
+ }
+ }
+
+out: printk(KERN_INFO "DMI not present.\n");
}
@@ -218,9 +248,9 @@ int dmi_check_system(struct dmi_system_id *list)
/* No match */
goto fail;
}
+ count++;
if (d->callback && d->callback(d))
break;
- count++;
fail: d++;
}
@@ -240,3 +270,32 @@ char *dmi_get_system_info(int field)
return dmi_ident[field];
}
EXPORT_SYMBOL(dmi_get_system_info);
+
+/**
+ * dmi_find_device - find onboard device by type/name
+ * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
+ * @desc: device name string or %NULL to match all
+ * @from: previous device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known onboard devices. If a device is
+ * found with a matching @vendor and @device, a pointer to its device
+ * structure is returned. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL to the @from argument.
+ * If @from is not %NULL, searches continue from next device.
+ */
+struct dmi_device * dmi_find_device(int type, const char *name,
+ struct dmi_device *from)
+{
+ struct list_head *d, *head = from ? &from->list : &dmi_devices;
+
+ for(d = head->next; d != &dmi_devices; d = d->next) {
+ struct dmi_device *dev = list_entry(d, struct dmi_device, list);
+
+ if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
+ ((name == NULL) || (strcmp(dev->name, name) == 0)))
+ return dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(dmi_find_device);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index abb909793efc..3aad03839660 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -507,7 +507,7 @@ label: \
pushl $__KERNEL_CS; \
pushl $sysenter_past_esp
-ENTRY(debug)
+KPROBE_ENTRY(debug)
cmpl $sysenter_entry,(%esp)
jne debug_stack_correct
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
@@ -518,7 +518,7 @@ debug_stack_correct:
movl %esp,%eax # pt_regs pointer
call do_debug
jmp ret_from_exception
-
+ .previous .text
/*
* NMI is doubly nasty. It can happen _while_ we're handling
* a debug fault, and the debug fault hasn't yet been able to
@@ -591,13 +591,14 @@ nmi_16bit_stack:
.long 1b,iret_exc
.previous
-ENTRY(int3)
+KPROBE_ENTRY(int3)
pushl $-1 # mark this as an int
SAVE_ALL
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_int3
jmp ret_from_exception
+ .previous .text
ENTRY(overflow)
pushl $0
@@ -631,17 +632,19 @@ ENTRY(stack_segment)
pushl $do_stack_segment
jmp error_code
-ENTRY(general_protection)
+KPROBE_ENTRY(general_protection)
pushl $do_general_protection
jmp error_code
+ .previous .text
ENTRY(alignment_check)
pushl $do_alignment_check
jmp error_code
-ENTRY(page_fault)
+KPROBE_ENTRY(page_fault)
pushl $do_page_fault
jmp error_code
+ .previous .text
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 6578f40bd501..0e727e6da5c9 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/sysdev.h>
+
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/desc.h>
@@ -77,7 +78,7 @@ static struct irq_pin_list {
int apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];
-int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
#ifdef CONFIG_PCI_MSI
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
@@ -222,13 +223,21 @@ static void clear_IO_APIC (void)
clear_IO_APIC_pin(apic, pin);
}
+#ifdef CONFIG_SMP
static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
{
unsigned long flags;
int pin;
struct irq_pin_list *entry = irq_2_pin + irq;
unsigned int apicid_value;
+ cpumask_t tmp;
+ cpus_and(tmp, cpumask, cpu_online_map);
+ if (cpus_empty(tmp))
+ tmp = TARGET_CPUS;
+
+ cpus_and(cpumask, tmp, CPU_MASK_ALL);
+
apicid_value = cpu_mask_to_apicid(cpumask);
/* Prepare to do the io_apic_write */
apicid_value = apicid_value << 24;
@@ -242,6 +251,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
break;
entry = irq_2_pin + entry->next;
}
+ set_irq_info(irq, cpumask);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -259,7 +269,6 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
# define Dprintk(x...)
# endif
-cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
#define IRQBALANCE_CHECK_ARCH -999
static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
@@ -328,12 +337,7 @@ static inline void balance_irq(int cpu, int irq)
cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
new_cpu = move(cpu, allowed_mask, now, 1);
if (cpu != new_cpu) {
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
- spin_unlock_irqrestore(&desc->lock, flags);
+ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
}
}
@@ -528,16 +532,12 @@ tryanotherirq:
cpus_and(tmp, target_cpu_mask, allowed_mask);
if (!cpus_empty(tmp)) {
- irq_desc_t *desc = irq_desc + selected_irq;
- unsigned long flags;
Dprintk("irq = %d moved to cpu = %d\n",
selected_irq, min_loaded);
/* mark for change destination */
- spin_lock_irqsave(&desc->lock, flags);
- pending_irq_balance_cpumask[selected_irq] =
- cpumask_of_cpu(min_loaded);
- spin_unlock_irqrestore(&desc->lock, flags);
+ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
+
/* Since we made a change, come back sooner to
* check for more variation.
*/
@@ -568,7 +568,8 @@ static int balanced_irq(void *unused)
/* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < NR_IRQS ; i++) {
- pending_irq_balance_cpumask[i] = cpumask_of_cpu(0);
+ pending_irq_cpumask[i] = cpumask_of_cpu(0);
+ set_pending_irq(i, cpumask_of_cpu(0));
}
for ( ; ; ) {
@@ -647,20 +648,9 @@ int __init irqbalance_disable(char *str)
__setup("noirqbalance", irqbalance_disable);
-static inline void move_irq(int irq)
-{
- /* note - we hold the desc->lock */
- if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
- set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]);
- cpus_clear(pending_irq_balance_cpumask[irq]);
- }
-}
-
late_initcall(balanced_irq_init);
-
-#else /* !CONFIG_IRQBALANCE */
-static inline void move_irq(int irq) { }
#endif /* CONFIG_IRQBALANCE */
+#endif /* CONFIG_SMP */
#ifndef CONFIG_SMP
void fastcall send_IPI_self(int vector)
@@ -820,6 +810,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
* we need to reprogram the ioredtbls to cater for the cpus which have come online
* so mask in all cases should simply be TARGET_CPUS
*/
+#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
@@ -838,6 +829,7 @@ void __init setup_ioapic_dest(void)
}
}
+#endif
/*
* EISA Edge/Level control register, ELCR
@@ -1127,7 +1119,7 @@ static inline int IO_APIC_irq_trigger(int irq)
}
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq)
{
@@ -1249,6 +1241,7 @@ static void __init setup_IO_APIC_irqs(void)
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+ set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
}
@@ -1944,6 +1937,7 @@ static void ack_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
+ move_irq(vector);
ack_edge_ioapic_irq(irq);
}
@@ -1958,6 +1952,7 @@ static void end_level_ioapic_vector (unsigned int vector)
{
int irq = vector_to_irq(vector);
+ move_irq(vector);
end_level_ioapic_irq(irq);
}
@@ -1975,14 +1970,17 @@ static void unmask_IO_APIC_vector (unsigned int vector)
unmask_IO_APIC_irq(irq);
}
+#ifdef CONFIG_SMP
static void set_ioapic_affinity_vector (unsigned int vector,
cpumask_t cpu_mask)
{
int irq = vector_to_irq(vector);
+ set_native_irq_info(vector, cpu_mask);
set_ioapic_affinity_irq(irq, cpu_mask);
}
#endif
+#endif
/*
* Level and edge triggered IO-APIC interrupts need different handling,
@@ -1992,7 +1990,7 @@ static void set_ioapic_affinity_vector (unsigned int vector,
* edge-triggered handler, without risking IRQ storms and other ugly
* races.
*/
-static struct hw_interrupt_type ioapic_edge_type = {
+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
.typename = "IO-APIC-edge",
.startup = startup_edge_ioapic,
.shutdown = shutdown_edge_ioapic,
@@ -2000,10 +1998,12 @@ static struct hw_interrupt_type ioapic_edge_type = {
.disable = disable_edge_ioapic,
.ack = ack_edge_ioapic,
.end = end_edge_ioapic,
+#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
+#endif
};
-static struct hw_interrupt_type ioapic_level_type = {
+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
.typename = "IO-APIC-level",
.startup = startup_level_ioapic,
.shutdown = shutdown_level_ioapic,
@@ -2011,7 +2011,9 @@ static struct hw_interrupt_type ioapic_level_type = {
.disable = disable_level_ioapic,
.ack = mask_and_ack_level_ioapic,
.end = end_level_ioapic,
+#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
+#endif
};
static inline void init_IO_APIC_traps(void)
@@ -2074,7 +2076,7 @@ static void ack_lapic_irq (unsigned int irq)
static void end_lapic_irq (unsigned int i) { /* nothing */ }
-static struct hw_interrupt_type lapic_irq_type = {
+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
.typename = "local-APIC-edge",
.startup = NULL, /* startup_irq() not used for IRQ0 */
.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
@@ -2569,6 +2571,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index a6d8c45961d3..6345b430b105 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -62,32 +62,32 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
return 0;
}
-int arch_prepare_kprobe(struct kprobe *p)
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
return 0;
}
-void arch_copy_kprobe(struct kprobe *p)
+void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
-void arch_arm_kprobe(struct kprobe *p)
+void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_disarm_kprobe(struct kprobe *p)
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_remove_kprobe(struct kprobe *p)
+void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
@@ -127,7 +127,8 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->eip = (unsigned long)&p->ainsn.insn;
}
-void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)&regs->esp;
struct kretprobe_instance *ri;
@@ -150,7 +151,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
*/
-static int kprobe_handler(struct pt_regs *regs)
+static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
@@ -176,7 +177,8 @@ static int kprobe_handler(struct pt_regs *regs)
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if (kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
regs->eflags |= kprobe_saved_eflags;
unlock_kprobes();
@@ -220,7 +222,10 @@ static int kprobe_handler(struct pt_regs *regs)
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
*/
+ regs->eip -= sizeof(kprobe_opcode_t);
ret = 1;
}
/* Not one of ours: let kernel handle it */
@@ -259,7 +264,7 @@ no_kprobe:
/*
* Called when we hit the probe point at kretprobe_trampoline
*/
-int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
@@ -338,7 +343,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
*/
-static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long *tos = (unsigned long *)&regs->esp;
unsigned long next_eip = 0;
@@ -444,8 +449,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
/*
* Wrapper routine to for handling exceptions.
*/
-int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
- void *data)
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
switch (val) {
@@ -473,7 +478,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
return NOTIFY_DONE;
}
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
@@ -495,7 +500,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-void jprobe_return(void)
+void __kprobes jprobe_return(void)
{
preempt_enable_no_resched();
asm volatile (" xchgl %%ebx,%%esp \n"
@@ -506,7 +511,7 @@ void jprobe_return(void)
(jprobe_saved_esp):"memory");
}
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
u8 *addr = (u8 *) (regs->eip - 1);
unsigned long stack_addr = (unsigned long)jprobe_saved_esp;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 8bbdbda07a2d..0178457db721 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -478,6 +478,11 @@ void touch_nmi_watchdog (void)
*/
for (i = 0; i < NR_CPUS; i++)
alert_counter[i] = 0;
+
+ /*
+ * Tickle the softlockup detector too:
+ */
+ touch_softlockup_watchdog();
}
extern void die_nmi(struct pt_regs *, const char *msg);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 294bcca985ab..e29fd5aeaf8e 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -82,7 +82,7 @@ EXPORT_SYMBOL(efi_enabled);
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
/* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
EXPORT_SYMBOL(boot_cpu_data);
unsigned long mmu_cr4_features;
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 6f794a78ee1e..eefea7c55008 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -194,10 +194,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
@@ -252,8 +249,7 @@ EXPORT_SYMBOL(profile_pc);
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
-static inline void do_timer_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
+static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
{
#ifdef CONFIG_X86_IO_APIC
if (timer_ack) {
@@ -307,7 +303,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
cur_timer->mark_offset();
- do_timer_interrupt(irq, NULL, regs);
+ do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
@@ -348,7 +344,7 @@ static void sync_cmos_clock(unsigned long dummy)
* This code is run on a timer. If the clock is set, that timer
* may not expire at the correct time. Thus, we adjust...
*/
- if ((time_status & STA_UNSYNC) != 0)
+ if (!ntp_synced())
/*
* Not synced, exit, do not restart a timer (if one is
* running, let it run out).
@@ -422,6 +418,7 @@ static int timer_resume(struct sys_device *dev)
last_timer->resume();
cur_timer = last_timer;
last_timer = NULL;
+ touch_softlockup_watchdog();
return 0;
}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index 001de97c9e4a..d973a8b681fd 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -18,8 +18,8 @@
#include "mach_timer.h"
#include <asm/hpet.h>
-static unsigned long __read_mostly hpet_usec_quotient; /* convert hpet clks to usec */
-static unsigned long tsc_hpet_quotient; /* convert tsc to hpet clks */
+static unsigned long hpet_usec_quotient __read_mostly; /* convert hpet clks to usec */
+static unsigned long tsc_hpet_quotient __read_mostly; /* convert tsc to hpet clks */
static unsigned long hpet_last; /* hpet counter value at last tick*/
static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 54629bb5893a..09a58cb6daa7 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -363,8 +363,9 @@ static inline void die_if_kernel(const char * str, struct pt_regs * regs, long e
die(str, regs, err);
}
-static void do_trap(int trapnr, int signr, char *str, int vm86,
- struct pt_regs * regs, long error_code, siginfo_t *info)
+static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
+ struct pt_regs * regs, long error_code,
+ siginfo_t *info)
{
struct task_struct *tsk = current;
tsk->thread.error_code = error_code;
@@ -460,7 +461,8 @@ DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
-fastcall void do_general_protection(struct pt_regs * regs, long error_code)
+fastcall void __kprobes do_general_protection(struct pt_regs * regs,
+ long error_code)
{
int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
@@ -657,7 +659,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
++nmi_count(cpu);
- if (!nmi_callback(regs, cpu))
+ if (!rcu_dereference(nmi_callback)(regs, cpu))
default_do_nmi(regs);
nmi_exit();
@@ -665,7 +667,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
void set_nmi_callback(nmi_callback_t callback)
{
- nmi_callback = callback;
+ rcu_assign_pointer(nmi_callback, callback);
}
EXPORT_SYMBOL_GPL(set_nmi_callback);
@@ -676,7 +678,7 @@ void unset_nmi_callback(void)
EXPORT_SYMBOL_GPL(unset_nmi_callback);
#ifdef CONFIG_KPROBES
-fastcall void do_int3(struct pt_regs *regs, long error_code)
+fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
== NOTIFY_STOP)
@@ -710,7 +712,7 @@ fastcall void do_int3(struct pt_regs *regs, long error_code)
* find every occurrence of the TF bit that could be saved away even
* by user code)
*/
-fastcall void do_debug(struct pt_regs * regs, long error_code)
+fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
{
unsigned int condition;
struct task_struct *tsk = current;
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 761972f8cb6c..13b9c62cbbb4 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -22,6 +22,7 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x9090
diff --git a/arch/i386/mach-default/topology.c b/arch/i386/mach-default/topology.c
index 23395fff35d1..b64314069e78 100644
--- a/arch/i386/mach-default/topology.c
+++ b/arch/i386/mach-default/topology.c
@@ -76,7 +76,7 @@ static int __init topology_init(void)
for_each_online_node(i)
arch_register_node(i);
- for_each_cpu(i)
+ for_each_present_cpu(i)
arch_register_cpu(i);
return 0;
}
@@ -87,7 +87,7 @@ static int __init topology_init(void)
{
int i;
- for_each_cpu(i)
+ for_each_present_cpu(i)
arch_register_cpu(i);
return 0;
}
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 6711ce3f6916..244d8ec66be2 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -37,7 +37,7 @@
#include <asm/mmzone.h>
#include <bios_ebda.h>
-struct pglist_data *node_data[MAX_NUMNODES];
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
bootmem_data_t node0_bdata;
@@ -49,8 +49,8 @@ bootmem_data_t node0_bdata;
* 2) node_start_pfn - the starting page frame number for a node
* 3) node_end_pfn - the ending page fram number for a node
*/
-unsigned long node_start_pfn[MAX_NUMNODES];
-unsigned long node_end_pfn[MAX_NUMNODES];
+unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
+unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
#ifdef CONFIG_DISCONTIGMEM
@@ -66,7 +66,7 @@ unsigned long node_end_pfn[MAX_NUMNODES];
* physnode_map[4-7] = 1;
* physnode_map[8- ] = -1;
*/
-s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1};
+s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
EXPORT_SYMBOL(physnode_map);
void memory_present(int nid, unsigned long start, unsigned long end)
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 411b8500ad1b..9edd4485b91e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -21,6 +21,7 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/highmem.h>
#include <linux/module.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -223,7 +224,8 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long);
* bit 1 == 0 means read, 1 means write
* bit 2 == 0 means kernel, 1 means user-mode
*/
-fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long error_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 9edfc058b894..2ebaf75f732e 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -393,7 +393,7 @@ void zap_low_mappings (void)
}
static int disable_nx __initdata = 0;
-u64 __supported_pte_mask = ~_PAGE_NX;
+u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
/*
* noexec = on|off
diff --git a/arch/i386/oprofile/init.c b/arch/i386/oprofile/init.c
index c90332de582b..5341d481d92f 100644
--- a/arch/i386/oprofile/init.c
+++ b/arch/i386/oprofile/init.c
@@ -15,9 +15,9 @@
* with the NMI mode driver.
*/
-extern int nmi_init(struct oprofile_operations * ops);
-extern int nmi_timer_init(struct oprofile_operations * ops);
-extern void nmi_exit(void);
+extern int op_nmi_init(struct oprofile_operations * ops);
+extern int op_nmi_timer_init(struct oprofile_operations * ops);
+extern void op_nmi_exit(void);
extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
@@ -28,11 +28,11 @@ int __init oprofile_arch_init(struct oprofile_operations * ops)
ret = -ENODEV;
#ifdef CONFIG_X86_LOCAL_APIC
- ret = nmi_init(ops);
+ ret = op_nmi_init(ops);
#endif
#ifdef CONFIG_X86_IO_APIC
if (ret < 0)
- ret = nmi_timer_init(ops);
+ ret = op_nmi_timer_init(ops);
#endif
ops->backtrace = x86_backtrace;
@@ -43,6 +43,6 @@ int __init oprofile_arch_init(struct oprofile_operations * ops)
void oprofile_arch_exit(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
- nmi_exit();
+ op_nmi_exit();
#endif
}
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 255e4702d185..0493e8b8ec49 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -355,7 +355,7 @@ static int __init ppro_init(char ** cpu_type)
/* in order to get driverfs right */
static int using_nmi;
-int __init nmi_init(struct oprofile_operations *ops)
+int __init op_nmi_init(struct oprofile_operations *ops)
{
__u8 vendor = boot_cpu_data.x86_vendor;
__u8 family = boot_cpu_data.x86;
@@ -420,7 +420,7 @@ int __init nmi_init(struct oprofile_operations *ops)
}
-void nmi_exit(void)
+void op_nmi_exit(void)
{
if (using_nmi)
exit_driverfs();
diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c
index c58d0c14f274..ad93cdd55d63 100644
--- a/arch/i386/oprofile/nmi_timer_int.c
+++ b/arch/i386/oprofile/nmi_timer_int.c
@@ -40,7 +40,7 @@ static void timer_stop(void)
}
-int __init nmi_timer_init(struct oprofile_operations * ops)
+int __init op_nmi_timer_init(struct oprofile_operations * ops)
{
extern int nmi_active;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3deced637f07..17b5dbf8c311 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -434,6 +434,11 @@ config GENERIC_IRQ_PROBE
bool
default y
+config GENERIC_PENDING_IRQ
+ bool
+ depends on GENERIC_HARDIRQS && SMP
+ default y
+
source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/oprofile/Kconfig"
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 829a6d80711c..0708edb06cc4 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -215,7 +215,7 @@ ia32_syscall_table:
data8 sys32_fork
data8 sys_read
data8 sys_write
- data8 sys32_open /* 5 */
+ data8 compat_sys_open /* 5 */
data8 sys_close
data8 sys32_waitpid
data8 sys_creat
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index c1e20d65dd6c..e29a8a55486a 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -2359,37 +2359,6 @@ sys32_brk (unsigned int brk)
return ret;
}
-/*
- * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
- */
-asmlinkage long
-sys32_open (const char __user * filename, int flags, int mode)
-{
- char * tmp;
- int fd, error;
-
- tmp = getname(filename);
- fd = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- fd = get_unused_fd();
- if (fd >= 0) {
- struct file *f = filp_open(tmp, flags, mode);
- error = PTR_ERR(f);
- if (IS_ERR(f))
- goto out_error;
- fd_install(fd, f);
- }
-out:
- putname(tmp);
- }
- return fd;
-
-out_error:
- put_unused_fd(fd);
- fd = error;
- goto out;
-}
-
/* Structure for ia32 emulation on ia64 */
struct epoll_event32
{
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index b242594be55b..307514f7a282 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
+obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c
deleted file mode 100644
index bbb8efe126b7..000000000000
--- a/arch/ia64/kernel/domain.c
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * arch/ia64/kernel/domain.c
- * Architecture specific sched-domains builder.
- *
- * Copyright (C) 2004 Jesse Barnes
- * Copyright (C) 2004 Silicon Graphics, Inc.
- */
-
-#include <linux/sched.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <linux/cpumask.h>
-#include <linux/init.h>
-#include <linux/topology.h>
-#include <linux/nodemask.h>
-
-#define SD_NODES_PER_DOMAIN 16
-
-#ifdef CONFIG_NUMA
-/**
- * find_next_best_node - find the next node to include in a sched_domain
- * @node: node whose sched_domain we're building
- * @used_nodes: nodes already in the sched_domain
- *
- * Find the next node to include in a given scheduling domain. Simply
- * finds the closest node not already in the @used_nodes map.
- *
- * Should use nodemask_t.
- */
-static int find_next_best_node(int node, unsigned long *used_nodes)
-{
- int i, n, val, min_val, best_node = 0;
-
- min_val = INT_MAX;
-
- for (i = 0; i < MAX_NUMNODES; i++) {
- /* Start at @node */
- n = (node + i) % MAX_NUMNODES;
-
- if (!nr_cpus_node(n))
- continue;
-
- /* Skip already used nodes */
- if (test_bit(n, used_nodes))
- continue;
-
- /* Simple min distance search */
- val = node_distance(node, n);
-
- if (val < min_val) {
- min_val = val;
- best_node = n;
- }
- }
-
- set_bit(best_node, used_nodes);
- return best_node;
-}
-
-/**
- * sched_domain_node_span - get a cpumask for a node's sched_domain
- * @node: node whose cpumask we're constructing
- * @size: number of nodes to include in this span
- *
- * Given a node, construct a good cpumask for its sched_domain to span. It
- * should be one that prevents unnecessary balancing, but also spreads tasks
- * out optimally.
- */
-static cpumask_t sched_domain_node_span(int node)
-{
- int i;
- cpumask_t span, nodemask;
- DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
-
- cpus_clear(span);
- bitmap_zero(used_nodes, MAX_NUMNODES);
-
- nodemask = node_to_cpumask(node);
- cpus_or(span, span, nodemask);
- set_bit(node, used_nodes);
-
- for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
- int next_node = find_next_best_node(node, used_nodes);
- nodemask = node_to_cpumask(next_node);
- cpus_or(span, span, nodemask);
- }
-
- return span;
-}
-#endif
-
-/*
- * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
- * can switch it on easily if needed.
- */
-#ifdef CONFIG_SCHED_SMT
-static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
-static struct sched_group sched_group_cpus[NR_CPUS];
-static int cpu_to_cpu_group(int cpu)
-{
- return cpu;
-}
-#endif
-
-static DEFINE_PER_CPU(struct sched_domain, phys_domains);
-static struct sched_group sched_group_phys[NR_CPUS];
-static int cpu_to_phys_group(int cpu)
-{
-#ifdef CONFIG_SCHED_SMT
- return first_cpu(cpu_sibling_map[cpu]);
-#else
- return cpu;
-#endif
-}
-
-#ifdef CONFIG_NUMA
-/*
- * The init_sched_build_groups can't handle what we want to do with node
- * groups, so roll our own. Now each node has its own list of groups which
- * gets dynamically allocated.
- */
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
-static struct sched_group *sched_group_nodes[MAX_NUMNODES];
-
-static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
-static struct sched_group sched_group_allnodes[MAX_NUMNODES];
-
-static int cpu_to_allnodes_group(int cpu)
-{
- return cpu_to_node(cpu);
-}
-#endif
-
-/*
- * Build sched domains for a given set of cpus and attach the sched domains
- * to the individual cpus
- */
-void build_sched_domains(const cpumask_t *cpu_map)
-{
- int i;
-
- /*
- * Set up domains for cpus specified by the cpu_map.
- */
- for_each_cpu_mask(i, *cpu_map) {
- int group;
- struct sched_domain *sd = NULL, *p;
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
-
- cpus_and(nodemask, nodemask, *cpu_map);
-
-#ifdef CONFIG_NUMA
- if (num_online_cpus()
- > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
- sd = &per_cpu(allnodes_domains, i);
- *sd = SD_ALLNODES_INIT;
- sd->span = *cpu_map;
- group = cpu_to_allnodes_group(i);
- sd->groups = &sched_group_allnodes[group];
- p = sd;
- } else
- p = NULL;
-
- sd = &per_cpu(node_domains, i);
- *sd = SD_NODE_INIT;
- sd->span = sched_domain_node_span(cpu_to_node(i));
- sd->parent = p;
- cpus_and(sd->span, sd->span, *cpu_map);
-#endif
-
- p = sd;
- sd = &per_cpu(phys_domains, i);
- group = cpu_to_phys_group(i);
- *sd = SD_CPU_INIT;
- sd->span = nodemask;
- sd->parent = p;
- sd->groups = &sched_group_phys[group];
-
-#ifdef CONFIG_SCHED_SMT
- p = sd;
- sd = &per_cpu(cpu_domains, i);
- group = cpu_to_cpu_group(i);
- *sd = SD_SIBLING_INIT;
- sd->span = cpu_sibling_map[i];
- cpus_and(sd->span, sd->span, *cpu_map);
- sd->parent = p;
- sd->groups = &sched_group_cpus[group];
-#endif
- }
-
-#ifdef CONFIG_SCHED_SMT
- /* Set up CPU (sibling) groups */
- for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_sibling_map = cpu_sibling_map[i];
- cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
- if (i != first_cpu(this_sibling_map))
- continue;
-
- init_sched_build_groups(sched_group_cpus, this_sibling_map,
- &cpu_to_cpu_group);
- }
-#endif
-
- /* Set up physical groups */
- for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
-
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
- continue;
-
- init_sched_build_groups(sched_group_phys, nodemask,
- &cpu_to_phys_group);
- }
-
-#ifdef CONFIG_NUMA
- init_sched_build_groups(sched_group_allnodes, *cpu_map,
- &cpu_to_allnodes_group);
-
- for (i = 0; i < MAX_NUMNODES; i++) {
- /* Set up node groups */
- struct sched_group *sg, *prev;
- cpumask_t nodemask = node_to_cpumask(i);
- cpumask_t domainspan;
- cpumask_t covered = CPU_MASK_NONE;
- int j;
-
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
- continue;
-
- domainspan = sched_domain_node_span(i);
- cpus_and(domainspan, domainspan, *cpu_map);
-
- sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
- sched_group_nodes[i] = sg;
- for_each_cpu_mask(j, nodemask) {
- struct sched_domain *sd;
- sd = &per_cpu(node_domains, j);
- sd->groups = sg;
- if (sd->groups == NULL) {
- /* Turn off balancing if we have no groups */
- sd->flags = 0;
- }
- }
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", i);
- continue;
- }
- sg->cpu_power = 0;
- sg->cpumask = nodemask;
- cpus_or(covered, covered, nodemask);
- prev = sg;
-
- for (j = 0; j < MAX_NUMNODES; j++) {
- cpumask_t tmp, notcovered;
- int n = (i + j) % MAX_NUMNODES;
-
- cpus_complement(notcovered, covered);
- cpus_and(tmp, notcovered, *cpu_map);
- cpus_and(tmp, tmp, domainspan);
- if (cpus_empty(tmp))
- break;
-
- nodemask = node_to_cpumask(n);
- cpus_and(tmp, tmp, nodemask);
- if (cpus_empty(tmp))
- continue;
-
- sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", j);
- break;
- }
- sg->cpu_power = 0;
- sg->cpumask = tmp;
- cpus_or(covered, covered, tmp);
- prev->next = sg;
- prev = sg;
- }
- prev->next = sched_group_nodes[i];
- }
-#endif
-
- /* Calculate CPU power for physical packages and nodes */
- for_each_cpu_mask(i, *cpu_map) {
- int power;
- struct sched_domain *sd;
-#ifdef CONFIG_SCHED_SMT
- sd = &per_cpu(cpu_domains, i);
- power = SCHED_LOAD_SCALE;
- sd->groups->cpu_power = power;
-#endif
-
- sd = &per_cpu(phys_domains, i);
- power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
- (cpus_weight(sd->groups->cpumask)-1) / 10;
- sd->groups->cpu_power = power;
-
-#ifdef CONFIG_NUMA
- sd = &per_cpu(allnodes_domains, i);
- if (sd->groups) {
- power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
- (cpus_weight(sd->groups->cpumask)-1) / 10;
- sd->groups->cpu_power = power;
- }
-#endif
- }
-
-#ifdef CONFIG_NUMA
- for (i = 0; i < MAX_NUMNODES; i++) {
- struct sched_group *sg = sched_group_nodes[i];
- int j;
-
- if (sg == NULL)
- continue;
-next_sg:
- for_each_cpu_mask(j, sg->cpumask) {
- struct sched_domain *sd;
- int power;
-
- sd = &per_cpu(phys_domains, j);
- if (j != first_cpu(sd->groups->cpumask)) {
- /*
- * Only add "power" once for each
- * physical package.
- */
- continue;
- }
- power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
- (cpus_weight(sd->groups->cpumask)-1) / 10;
-
- sg->cpu_power += power;
- }
- sg = sg->next;
- if (sg != sched_group_nodes[i])
- goto next_sg;
- }
-#endif
-
- /* Attach the domains */
- for_each_cpu_mask(i, *cpu_map) {
- struct sched_domain *sd;
-#ifdef CONFIG_SCHED_SMT
- sd = &per_cpu(cpu_domains, i);
-#else
- sd = &per_cpu(phys_domains, i);
-#endif
- cpu_attach_domain(sd, i);
- }
-}
-/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
- */
-void arch_init_sched_domains(const cpumask_t *cpu_map)
-{
- cpumask_t cpu_default_map;
-
- /*
- * Setup mask for cpus without special case scheduling requirements.
- * For now this just excludes isolated cpus, but could be used to
- * exclude other special cases in the future.
- */
- cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
-
- build_sched_domains(&cpu_default_map);
-}
-
-void arch_destroy_sched_domains(const cpumask_t *cpu_map)
-{
-#ifdef CONFIG_NUMA
- int i;
- for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
- struct sched_group *oldsg, *sg = sched_group_nodes[i];
-
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
- continue;
-
- if (sg == NULL)
- continue;
- sg = sg->next;
-next_sg:
- oldsg = sg;
- sg = sg->next;
- kfree(oldsg);
- if (oldsg != sched_group_nodes[i])
- goto next_sg;
- sched_group_nodes[i] = NULL;
- }
-#endif
-}
-
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 28f2aadc38d0..205d98028261 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -91,23 +91,8 @@ skip:
}
#ifdef CONFIG_SMP
-/*
- * This is updated when the user sets irq affinity via /proc
- */
-static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
-static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
-
static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
-/*
- * Arch specific routine for deferred write to iosapic rte to reprogram
- * intr destination.
- */
-void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
-{
- pending_irq_cpumask[irq] = mask_val;
-}
-
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
cpumask_t mask = CPU_MASK_NONE;
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
+ set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
-
-
-void move_irq(int irq)
-{
- /* note - we hold desc->lock */
- cpumask_t tmp;
- irq_desc_t *desc = irq_descp(irq);
- int redir = test_bit(irq, pending_irq_redir);
-
- if (unlikely(!desc->handler->set_affinity))
- return;
-
- if (!cpus_empty(pending_irq_cpumask[irq])) {
- cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
- if (unlikely(!cpus_empty(tmp))) {
- desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
- pending_irq_cpumask[irq]);
- }
- cpus_clear(pending_irq_cpumask[irq]);
- }
-}
-
-
#endif /* CONFIG_SMP */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index b7fa3ccd2b0f..2323377e3695 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -49,6 +49,7 @@
/*
* void jprobe_break(void)
*/
+ .section .kprobes.text, "ax"
ENTRY(jprobe_break)
break.m 0x80300
END(jprobe_break)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 884f5cd27d8a..471086b808a4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = {
* is IP relative instruction and update the kprobe
* inst flag accordingly
*/
-static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode,
- unsigned long kprobe_inst, struct kprobe *p)
+static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
+ uint major_opcode,
+ unsigned long kprobe_inst,
+ struct kprobe *p)
{
p->ainsn.inst_flag = 0;
p->ainsn.target_br_reg = 0;
+ /* Check for Break instruction
+ * Bits 37:40 Major opcode to be zero
+ * Bits 27:32 X6 to be zero
+ * Bits 32:35 X3 to be zero
+ */
+ if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
+ /* is a break instruction */
+ p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
+ return;
+ }
+
if (bundle_encoding[template][slot] == B) {
switch (major_opcode) {
case INDIRECT_CALL_OPCODE:
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode
* Returns 0 if supported
* Returns -EINVAL if unsupported
*/
-static int unsupported_inst(uint template, uint slot, uint major_opcode,
- unsigned long kprobe_inst, struct kprobe *p)
+static int __kprobes unsupported_inst(uint template, uint slot,
+ uint major_opcode,
+ unsigned long kprobe_inst,
+ struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode,
* on which we are inserting kprobe is cmp instruction
* with ctype as unc.
*/
-static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
-unsigned long kprobe_inst)
+static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
+ uint major_opcode,
+ unsigned long kprobe_inst)
{
cmp_inst_t cmp_inst;
uint ctype_unc = 0;
@@ -201,8 +217,10 @@ out:
* In this function we override the bundle with
* the break instruction at the given slot.
*/
-static void prepare_break_inst(uint template, uint slot, uint major_opcode,
- unsigned long kprobe_inst, struct kprobe *p)
+static void __kprobes prepare_break_inst(uint template, uint slot,
+ uint major_opcode,
+ unsigned long kprobe_inst,
+ struct kprobe *p)
{
unsigned long break_inst = BREAK_INST;
bundle_t *bundle = &p->ainsn.insn.bundle;
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr)
&& addr < (unsigned long)__end_ivt_text);
}
-static int valid_kprobe_addr(int template, int slot, unsigned long addr)
+static int __kprobes valid_kprobe_addr(int template, int slot,
+ unsigned long addr)
{
if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
printk(KERN_WARNING "Attempting to insert unaligned kprobe "
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void)
* - cleanup by marking the instance as unused
* - long jump back to the original return address
*/
-int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
{
struct kretprobe_instance *ri;
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
}
}
-int arch_prepare_kprobe(struct kprobe *p)
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long) p->addr;
unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p)
return 0;
}
-void arch_arm_kprobe(struct kprobe *p)
+void __kprobes arch_arm_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p)
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
-void arch_disarm_kprobe(struct kprobe *p)
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
unsigned long addr = (unsigned long)p->addr;
unsigned long arm_addr = addr & ~0xFULL;
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p)
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
-void arch_remove_kprobe(struct kprobe *p)
+void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p)
* to original stack address, handle the case where we need to fixup the
* relative IP address and/or fixup branch register.
*/
-static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
@@ -528,13 +548,16 @@ turn_ss_off:
ia64_psr(regs)->ss = 0;
}
-static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
{
unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
unsigned long slot = (unsigned long)p->addr & 0xf;
- /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
- regs->cr_iip = bundle_addr & ~0xFULL;
+ /* single step inline if break instruction */
+ if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
+ regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
+ else
+ regs->cr_iip = bundle_addr & ~0xFULL;
if (slot > 2)
slot = 0;
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
ia64_psr(regs)->ss = 1;
}
-static int pre_kprobes_handler(struct die_args *args)
+static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
+{
+ unsigned int slot = ia64_psr(regs)->ri;
+ unsigned int template, major_opcode;
+ unsigned long kprobe_inst;
+ unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
+ bundle_t bundle;
+
+ memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
+ template = bundle.quad0.template;
+
+ /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot++;
+
+ /* Get Kprobe probe instruction at given slot*/
+ get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
+
+ /* For break instruction,
+ * Bits 37:40 Major opcode to be zero
+ * Bits 27:32 X6 to be zero
+ * Bits 32:35 X3 to be zero
+ */
+ if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
+ /* Not a break instruction */
+ return 0;
+ }
+
+ /* Is a break instruction */
+ return 1;
+}
+
+static int __kprobes pre_kprobes_handler(struct die_args *args)
{
struct kprobe *p;
int ret = 0;
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args)
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if ( (kprobe_status == KPROBE_HIT_SS) &&
+ (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
+ ia64_psr(regs)->ss = 0;
unlock_kprobes();
goto no_kprobe;
}
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args)
p = get_kprobe(addr);
if (!p) {
unlock_kprobes();
+ if (!is_ia64_break_inst(regs)) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+
+ }
+
+ /* Not one of our break, let kernel handle it */
goto no_kprobe;
}
@@ -616,7 +686,7 @@ no_kprobe:
return ret;
}
-static int post_kprobes_handler(struct pt_regs *regs)
+static int __kprobes post_kprobes_handler(struct pt_regs *regs)
{
if (!kprobe_running())
return 0;
@@ -641,7 +711,7 @@ out:
return 1;
}
-static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
{
if (!kprobe_running())
return 0;
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
return 0;
}
-int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
- void *data)
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
switch(val) {
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
return NOTIFY_DONE;
}
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
*regs = jprobe_saved_regs;
return 1;
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 4440c8343fa4..f970359e7edf 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/module.h> /* for EXPORT_SYMBOL */
#include <linux/hardirq.h>
+#include <linux/kprobes.h>
#include <asm/fpswa.h>
#include <asm/ia32.h>
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
}
void
-ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
+__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
{
siginfo_t siginfo;
int sig, code;
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
return rv;
}
-void
+void __kprobes
ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long iim, unsigned long itir, long arg5, long arg6,
long arg7, struct pt_regs regs)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index a676e79e0681..30d8564e9603 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -48,6 +48,7 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.gnu.linkonce.t*)
}
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 3e2cfa2c6d39..2a0d27f2f21b 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -20,6 +20,7 @@
*
* Note: "in0" and "in1" are preserved for debugging purposes.
*/
+ .section .kprobes.text,"ax"
GLOBAL_ENTRY(flush_icache_range)
.prologue
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index ff62551eb3a1..24614869e866 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
+#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -76,7 +77,7 @@ mapped_kernel_page_is_present (unsigned long address)
return pte_present(pte);
}
-void
+void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
int signal = SIGSEGV, code = SEGV_MAPERR;
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 4564ed0b5ff3..906622d9f933 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -431,7 +431,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
{
struct sysdata_el *element;
- element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL);
+ element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
if (!element) {
dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
return;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 254fe15c064b..b45db5133f55 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -191,7 +191,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
{
struct cx_dev *cx_dev;
- cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL);
+ cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL);
DBG("cx_dev= 0x%p\n", cx_dev);
if (cx_dev == NULL)
return -ENOMEM;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index ea09c12f0258..19bced34d5f1 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -148,7 +148,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
tioca_kern->ca_pcigart_entries =
tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_pagemap =
- kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
+ kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
if (!tioca_kern->ca_pcigart_pagemap) {
free_pages((unsigned long)tioca_kern->ca_gart,
get_order(tioca_kern->ca_gart_size));
@@ -392,7 +392,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
* allocate a map struct
*/
- ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
+ ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
if (!ca_dmamap)
goto map_return;
@@ -600,7 +600,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
+ tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
if (!tioca_common)
return NULL;
@@ -609,7 +609,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* init kernel-private area */
- tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
+ tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
if (!tioca_kern) {
kfree(tioca_common);
return NULL;
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 8a2b77bc5749..539c562cd54d 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -171,10 +171,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
@@ -221,7 +218,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* called as close as possible to 500 ms before the new second starts.
*/
write_seqlock(&xtime_lock);
- if ((time_status & STA_UNSYNC) == 0
+ if (ntp_synced()
&& xtime.tv_sec > last_rtc_update + 660
&& (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
&& (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index c6b2a410bf9a..eb63ca6ed94c 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -14,6 +14,7 @@
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
+#include <linux/module.h>
#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
#include <linux/smp_lock.h>
#include <asm/bvme6000hw.h>
@@ -171,7 +172,7 @@ static struct miscdevice rtc_dev = {
.fops = &rtc_fops
};
-int __init rtc_DP8570A_init(void)
+static int __init rtc_DP8570A_init(void)
{
if (!MACH_IS_BVME6000)
return -ENODEV;
@@ -179,4 +180,4 @@ int __init rtc_DP8570A_init(void)
printk(KERN_INFO "DP8570A Real Time Clock Driver v%s\n", RTC_VERSION);
return misc_register(&rtc_dev);
}
-
+module_init(rtc_DP8570A_init);
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index e47e19588525..4ec95e3cb874 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -166,10 +166,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 8a2425069088..7977eae50af2 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -161,7 +161,7 @@ static struct miscdevice rtc_dev=
.fops = &rtc_fops
};
-int __init rtc_MK48T08_init(void)
+static int __init rtc_MK48T08_init(void)
{
if (!MACH_IS_MVME16x)
return -ENODEV;
@@ -169,4 +169,4 @@ int __init rtc_MK48T08_init(void)
printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
return misc_register(&rtc_dev);
}
-
+module_init(rtc_MK48T08_init);
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 5c3ca671627c..b17c1ecba966 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -68,7 +68,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -178,10 +178,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 4613219dd73e..ece4564919d8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -546,20 +546,20 @@ struct msgbuf32 { s32 mtype; char mtext[1]; };
struct ipc_perm32
{
key_t key;
- compat_uid_t uid;
- compat_gid_t gid;
- compat_uid_t cuid;
- compat_gid_t cgid;
+ __compat_uid_t uid;
+ __compat_gid_t gid;
+ __compat_uid_t cuid;
+ __compat_gid_t cgid;
compat_mode_t mode;
unsigned short seq;
};
struct ipc64_perm32 {
key_t key;
- compat_uid_t uid;
- compat_gid_t gid;
- compat_uid_t cuid;
- compat_gid_t cgid;
+ __compat_uid_t uid;
+ __compat_gid_t gid;
+ __compat_uid_t cuid;
+ __compat_gid_t cgid;
compat_mode_t mode;
unsigned short seq;
unsigned short __pad1;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index f3bf0e43b8bb..b46595462717 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -632,10 +632,7 @@ asmlinkage int irix_stime(int value)
write_seqlock_irq(&xtime_lock);
xtime.tv_sec = value;
xtime.tv_nsec = 0;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
return 0;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 648c82292ed6..0dd0df7a3b04 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -223,10 +223,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
@@ -442,7 +439,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* called as close as possible to 500 ms before the new second starts.
*/
write_seqlock(&xtime_lock);
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 8c1b96fffa76..cddf1cedf007 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -118,7 +118,7 @@ again:
* RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to when a second starts.
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 6cf7407344ba..7ff67f8e9f8c 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -188,10 +188,7 @@ do_settimeofday (struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index bf4ddca5e853..a3c5281a5d2d 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -169,7 +169,7 @@ void timer_interrupt(struct pt_regs * regs)
* We should have an rtc call that only sets the minutes and
* seconds like on Intel to avoid problems with non UTC clocks.
*/
- if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 &&
+ if ( ppc_md.set_rtc_time && ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
jiffies - wall_jiffies == 1) {
@@ -271,10 +271,7 @@ int do_settimeofday(struct timespec *tv)
*/
last_rtc_update = new_sec - 658;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irqrestore(&xtime_lock, flags);
clock_was_set();
return 0;
diff --git a/arch/ppc/syslib/ocp.c b/arch/ppc/syslib/ocp.c
index e5fd2ae503ea..9ccce438bd7a 100644
--- a/arch/ppc/syslib/ocp.c
+++ b/arch/ppc/syslib/ocp.c
@@ -165,7 +165,7 @@ ocp_device_remove(struct device *dev)
}
static int
-ocp_device_suspend(struct device *dev, u32 state)
+ocp_device_suspend(struct device *dev, pm_message_t state)
{
struct ocp_device *ocp_dev = to_ocp_dev(dev);
struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver);
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index a3d519518fb8..7e80d49c589a 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -44,7 +44,7 @@ static struct kprobe *kprobe_prev;
static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
static struct pt_regs jprobe_saved_regs;
-int arch_prepare_kprobe(struct kprobe *p)
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
kprobe_opcode_t insn = *p->addr;
@@ -68,27 +68,27 @@ int arch_prepare_kprobe(struct kprobe *p)
return ret;
}
-void arch_copy_kprobe(struct kprobe *p)
+void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
-void arch_arm_kprobe(struct kprobe *p)
+void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_disarm_kprobe(struct kprobe *p)
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_remove_kprobe(struct kprobe *p)
+void __kprobes arch_remove_kprobe(struct kprobe *p)
{
up(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
@@ -102,7 +102,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->msr |= MSR_SE;
/* single step inline if it is a trap variant */
- if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn))
+ if (is_trap(insn))
regs->nip = (unsigned long)p->addr;
else
regs->nip = (unsigned long)p->ainsn.insn;
@@ -122,7 +122,8 @@ static inline void restore_previous_kprobe(void)
kprobe_saved_msr = kprobe_saved_msr_prev;
}
-void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
{
struct kretprobe_instance *ri;
@@ -151,7 +152,9 @@ static inline int kprobe_handler(struct pt_regs *regs)
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ kprobe_opcode_t insn = *p->ainsn.insn;
+ if (kprobe_status == KPROBE_HIT_SS &&
+ is_trap(insn)) {
regs->msr &= ~MSR_SE;
regs->msr |= kprobe_saved_msr;
unlock_kprobes();
@@ -191,8 +194,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
* trap variant, it could belong to someone else
*/
kprobe_opcode_t cur_insn = *addr;
- if (IS_TW(cur_insn) || IS_TD(cur_insn) ||
- IS_TWI(cur_insn) || IS_TDI(cur_insn))
+ if (is_trap(cur_insn))
goto no_kprobe;
/*
* The breakpoint instruction was removed right
@@ -244,7 +246,7 @@ void kretprobe_trampoline_holder(void)
/*
* Called when the probe at kretprobe trampoline is hit
*/
-int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
@@ -308,7 +310,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*/
-static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
int ret;
unsigned int insn = *p->ainsn.insn;
@@ -373,8 +375,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
/*
* Wrapper routine to for handling exceptions.
*/
-int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
- void *data)
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
@@ -402,11 +404,11 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
default:
break;
}
- preempt_enable();
+ preempt_enable_no_resched();
return ret;
}
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
@@ -419,16 +421,16 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-void jprobe_return(void)
+void __kprobes jprobe_return(void)
{
asm volatile("trap" ::: "memory");
}
-void jprobe_return_end(void)
+void __kprobes jprobe_return_end(void)
{
};
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
/*
* FIXME - we should ideally be validating that we got here 'cos
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 474df0a862bf..6d860c1d9fa0 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -183,7 +183,7 @@ PPC64_CACHES:
* flush all bytes from start through stop-1 inclusive
*/
-_GLOBAL(__flush_icache_range)
+_KPROBE(__flush_icache_range)
/*
* Flush the data cache to memory
@@ -223,7 +223,7 @@ _GLOBAL(__flush_icache_range)
bdnz 2b
isync
blr
-
+ .previous .text
/*
* Like above, but only do the D-cache.
*
@@ -957,7 +957,7 @@ _GLOBAL(sys_call_table32)
.llong .ppc_fork
.llong .sys_read
.llong .sys_write
- .llong .sys32_open /* 5 */
+ .llong .compat_sys_open /* 5 */
.llong .sys_close
.llong .sys32_waitpid
.llong .sys32_creat
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/ppc64/kernel/pSeries_reconfig.c
index dc2a69d412a2..58c61219d08e 100644
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ b/arch/ppc64/kernel/pSeries_reconfig.c
@@ -111,7 +111,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
struct device_node *np;
int err = -ENOMEM;
- np = kcalloc(1, sizeof(*np), GFP_KERNEL);
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto out_err;
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index 206619080e66..214914a95a50 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -867,37 +867,6 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
return sys_lseek(fd, (int)offset, origin);
}
-/*
- * This is just a version for 32-bit applications which does
- * not force O_LARGEFILE on.
- */
-asmlinkage long sys32_open(const char __user * filename, int flags, int mode)
-{
- char * tmp;
- int fd, error;
-
- tmp = getname(filename);
- fd = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- fd = get_unused_fd();
- if (fd >= 0) {
- struct file * f = filp_open(tmp, flags, mode);
- error = PTR_ERR(f);
- if (IS_ERR(f))
- goto out_error;
- fd_install(fd, f);
- }
-out:
- putname(tmp);
- }
- return fd;
-
-out_error:
- put_unused_fd(fd);
- fd = error;
- goto out;
-}
-
/* Note: it is necessary to treat bufsiz as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 91ef95ccda4f..9939c206afa4 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -128,7 +128,7 @@ static __inline__ void timer_check_rtc(void)
* We should have an rtc call that only sets the minutes and
* seconds like on Intel to avoid problems with non UTC clocks.
*/
- if ( (time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
jiffies - wall_jiffies == 1) {
@@ -435,10 +435,7 @@ int do_settimeofday(struct timespec *tv)
*/
last_rtc_update = new_sec - 658;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp),
do_gtod.varp->tb_to_xs );
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index a8d5e83ee89f..7467ae508e6e 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/kprobes.h>
#include <asm/kdebug.h>
#include <asm/pgtable.h>
@@ -220,7 +221,7 @@ void instruction_breakpoint_exception(struct pt_regs *regs)
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
}
-void single_step_exception(struct pt_regs *regs)
+void __kprobes single_step_exception(struct pt_regs *regs)
{
regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
@@ -398,7 +399,7 @@ check_bug_trap(struct pt_regs *regs)
return 0;
}
-void program_check_exception(struct pt_regs *regs)
+void __kprobes program_check_exception(struct pt_regs *regs)
{
if (debugger_fault_handler(regs))
return;
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 4103cc13f8d6..0306510bc4ff 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -15,6 +15,7 @@ SECTIONS
*(.text .text.*)
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.fixup)
. = ALIGN(4096);
_etext = .;
diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c
index 20b0f37e8bf8..772f0714a5b7 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/ppc64/mm/fault.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
+#include <linux/kprobes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -84,8 +85,8 @@ static int store_updates_sp(struct pt_regs *regs)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 8ca485676780..2fd75da15495 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -139,10 +139,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index df7a9b9d4cbf..02ca69918d7c 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -215,10 +215,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
@@ -234,7 +231,7 @@ static long last_rtc_update;
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
-static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
{
do_timer(regs);
#ifndef CONFIG_SMP
@@ -252,7 +249,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
* RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -285,7 +282,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* locally disabled. -arca
*/
write_seqlock(&xtime_lock);
- do_timer_interrupt(irq, NULL, regs);
+ do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index 6c84da3efc73..f4a62a10053c 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -247,10 +247,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
@@ -303,7 +300,7 @@ static long last_rtc_update = 0;
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
-static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
{
unsigned long long current_ctc;
asm ("getcon cr62, %0" : "=r" (current_ctc));
@@ -328,7 +325,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
* RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -361,7 +358,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* locally disabled. -arca
*/
write_lock(&xtime_lock);
- do_timer_interrupt(irq, NULL, regs);
+ do_timer_interrupt(irq, regs);
write_unlock(&xtime_lock);
return IRQ_HANDLED;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 597d3ff6ad68..36a40697b8d6 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -840,10 +840,7 @@ static int pci_do_settimeofday(struct timespec *tv)
xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = tv->tv_nsec;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
return 0;
}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 3b759aefc170..bc015e980341 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -139,7 +139,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
/* Determine when to update the Mostek clock. */
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -554,10 +554,7 @@ static int sbus_do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
return 0;
}
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index bbf11f85dab1..0d66d07c8c6e 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -8,6 +8,7 @@
#include <linux/kprobes.h>
#include <asm/kdebug.h>
#include <asm/signal.h>
+#include <asm/cacheflush.h>
/* We do not have hardware single-stepping on sparc64.
* So we implement software single-stepping with breakpoint
@@ -37,31 +38,31 @@
* - Mark that we are no longer actively in a kprobe.
*/
-int arch_prepare_kprobe(struct kprobe *p)
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
return 0;
}
-void arch_copy_kprobe(struct kprobe *p)
+void __kprobes arch_copy_kprobe(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
p->opcode = *p->addr;
}
-void arch_arm_kprobe(struct kprobe *p)
+void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flushi(p->addr);
}
-void arch_disarm_kprobe(struct kprobe *p)
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flushi(p->addr);
}
-void arch_remove_kprobe(struct kprobe *p)
+void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
@@ -111,7 +112,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
}
}
-static int kprobe_handler(struct pt_regs *regs)
+static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
void *addr = (void *) regs->tpc;
@@ -191,8 +192,9 @@ no_kprobe:
* The original INSN location was REAL_PC, it actually
* executed at PC and produced destination address NPC.
*/
-static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
- unsigned long pc, unsigned long npc)
+static unsigned long __kprobes relbranch_fixup(u32 insn, unsigned long real_pc,
+ unsigned long pc,
+ unsigned long npc)
{
/* Branch not taken, no mods necessary. */
if (npc == pc + 0x4UL)
@@ -217,7 +219,8 @@ static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
/* If INSN is an instruction which writes it's PC location
* into a destination register, fix that up.
*/
-static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
+static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
+ unsigned long real_pc)
{
unsigned long *slot = NULL;
@@ -257,7 +260,7 @@ static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
* This function prepares to return from the post-single-step
* breakpoint trap.
*/
-static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
u32 insn = p->ainsn.insn[0];
@@ -315,8 +318,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
/*
* Wrapper routine to for handling exceptions.
*/
-int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
- void *data)
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
switch (val) {
@@ -344,7 +347,8 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
return NOTIFY_DONE;
}
-asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
+asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
+ struct pt_regs *regs)
{
BUG_ON(trap_level != 0x170 && trap_level != 0x171);
@@ -368,7 +372,7 @@ static struct pt_regs jprobe_saved_regs;
static struct pt_regs *jprobe_saved_regs_location;
static struct sparc_stackf jprobe_saved_stack;
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
@@ -390,7 +394,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-void jprobe_return(void)
+void __kprobes jprobe_return(void)
{
preempt_enable_no_resched();
__asm__ __volatile__(
@@ -403,7 +407,7 @@ extern void jprobe_return_trap_instruction(void);
extern void __show_regs(struct pt_regs * regs);
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
u32 *addr = (u32 *) regs->tpc;
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 1d3aa588df8a..7f6239ed2521 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1002,29 +1002,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
asmlinkage long sparc32_open(const char __user *filename,
int flags, int mode)
{
- char * tmp;
- int fd, error;
-
- tmp = getname(filename);
- fd = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- fd = get_unused_fd();
- if (fd >= 0) {
- struct file * f = filp_open(tmp, flags, mode);
- error = PTR_ERR(f);
- if (IS_ERR(f))
- goto out_error;
- fd_install(fd, f);
- }
-out:
- putname(tmp);
- }
- return fd;
-
-out_error:
- put_unused_fd(fd);
- fd = error;
- goto out;
+ return do_sys_open(filename, flags, mode);
}
extern unsigned long do_mremap(unsigned long addr,
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 362b9c26871b..3f08a32f51a1 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -449,7 +449,7 @@ static inline void timer_check_rtc(void)
static long last_rtc_update;
/* Determine when to update the Mostek clock. */
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 950423da8a6a..f47d0be39378 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -17,6 +17,7 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.gnu.warning)
} =0
_etext = .;
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 52e9375288a9..db1e3310e907 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/kprobes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -117,8 +118,9 @@ unsigned long __init prom_probe_memory (void)
return tally;
}
-static void unhandled_fault(unsigned long address, struct task_struct *tsk,
- struct pt_regs *regs)
+static void __kprobes unhandled_fault(unsigned long address,
+ struct task_struct *tsk,
+ struct pt_regs *regs)
{
if ((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
@@ -304,7 +306,7 @@ cannot_handle:
unhandled_fault (address, current, regs);
}
-asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3fbaf342a452..fdb1ebb308c9 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/kprobes.h>
#include <asm/head.h>
#include <asm/system.h>
@@ -250,7 +251,7 @@ out:
put_cpu();
}
-void flush_icache_range(unsigned long start, unsigned long end)
+void __kprobes flush_icache_range(unsigned long start, unsigned long end)
{
/* Cheetah has coherent I-cache. */
if (tlb_type == spitfire) {
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 8dfa825eca51..b2ee9b53227f 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -119,6 +119,7 @@ __spitfire_flush_tlb_mm_slow:
#else
#error unsupported PAGE_SIZE
#endif
+ .section .kprobes.text, "ax"
.align 32
.globl __flush_icache_page
__flush_icache_page: /* %o0 = phys_page */
@@ -201,6 +202,7 @@ dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
nop
#endif /* DCACHE_ALIASING_POSSIBLE */
+ .previous .text
.align 32
__prefill_dtlb:
rdpr %pstate, %g7
diff --git a/arch/um/include/um_uaccess.h b/arch/um/include/um_uaccess.h
index 6e348cb6de24..84c0868cd561 100644
--- a/arch/um/include/um_uaccess.h
+++ b/arch/um/include/um_uaccess.h
@@ -20,13 +20,6 @@
#define access_ok(type, addr, size) \
CHOOSE_MODE_PROC(access_ok_tt, access_ok_skas, type, addr, size)
-/* this function will go away soon - use access_ok() instead */
-static inline int __deprecated verify_area(int type, const void __user *addr, unsigned long size)
-{
- return (CHOOSE_MODE_PROC(verify_area_tt, verify_area_skas, type, addr,
- size));
-}
-
static inline int copy_from_user(void *to, const void __user *from, int n)
{
return(CHOOSE_MODE_PROC(copy_from_user_tt, copy_from_user_skas, to,
diff --git a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
index f722a268238a..ea3fd8844ff0 100644
--- a/arch/v850/kernel/time.c
+++ b/arch/v850/kernel/time.c
@@ -66,7 +66,7 @@ static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs)
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
*/
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -169,10 +169,7 @@ int do_settimeofday(struct timespec *tv)
xtime.tv_sec = tv->tv_sec;
xtime.tv_nsec = tv->tv_nsec;
- time_adjust = 0; /* stop active adjtime () */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq (&xtime_lock);
clock_was_set();
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 75e52c57f19c..251ce7cf1a38 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -441,6 +441,11 @@ config ISA_DMA_API
bool
default y
+config GENERIC_PENDING_IRQ
+ bool
+ depends on GENERIC_HARDIRQS && SMP
+ default y
+
menu "Power management options"
source kernel/power/Kconfig
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S
index ff58b2832b75..12ea0b6c52e2 100644
--- a/arch/x86_64/boot/setup.S
+++ b/arch/x86_64/boot/setup.S
@@ -81,7 +81,7 @@ start:
# This is the setup header, and it must start at %cs:2 (old 0x9020:2)
.ascii "HdrS" # header signature
- .word 0x0203 # header version number (>= 0x0105)
+ .word 0x0204 # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
start_sys_seg: .word SYSSEG
diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c
index 18b5bac1c428..c44f5e2ec100 100644
--- a/arch/x86_64/boot/tools/build.c
+++ b/arch/x86_64/boot/tools/build.c
@@ -178,7 +178,9 @@ int main(int argc, char ** argv)
die("Output: seek failed");
buf[0] = (sys_size & 0xff);
buf[1] = ((sys_size >> 8) & 0xff);
- if (write(1, buf, 2) != 2)
+ buf[2] = ((sys_size >> 16) & 0xff);
+ buf[3] = ((sys_size >> 24) & 0xff);
+ if (write(1, buf, 4) != 4)
die("Write of image length failed");
return 0; /* Everything is OK */
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index c45d6a05b984..f174083d5567 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -307,7 +307,7 @@ ia32_sys_call_table:
.quad stub32_fork
.quad sys_read
.quad sys_write
- .quad sys32_open /* 5 */
+ .quad compat_sys_open /* 5 */
.quad sys_close
.quad sys32_waitpid
.quad sys_creat
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index be996d1b691e..04d80406ce4f 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -969,32 +969,6 @@ long sys32_kill(int pid, int sig)
return sys_kill(pid, sig);
}
-asmlinkage long sys32_open(const char __user * filename, int flags, int mode)
-{
- char * tmp;
- int fd, error;
-
- /* don't force O_LARGEFILE */
- tmp = getname(filename);
- fd = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- fd = get_unused_fd();
- if (fd >= 0) {
- struct file *f = filp_open(tmp, flags, mode);
- error = PTR_ERR(f);
- if (IS_ERR(f)) {
- put_unused_fd(fd);
- fd = error;
- } else {
- fsnotify_open(f->f_dentry);
- fd_install(fd, f);
- }
- }
- putname(tmp);
- }
- return fd;
-}
-
extern asmlinkage long
sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index b548dea4e5b9..116ac5f53dce 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -85,7 +85,7 @@ int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
struct e820entry *ei = &e820.map[i];
if (type && ei->type != type)
continue;
- if (ei->addr >= end || ei->addr + ei->size < start)
+ if (ei->addr >= end || ei->addr + ei->size <= start)
continue;
return 1;
}
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 096d470e280f..be51dbe1f75e 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -784,8 +784,9 @@ ENTRY(execve)
ret
CFI_ENDPROC
-ENTRY(page_fault)
+KPROBE_ENTRY(page_fault)
errorentry do_page_fault
+ .previous .text
ENTRY(coprocessor_error)
zeroentry do_coprocessor_error
@@ -797,13 +798,14 @@ ENTRY(device_not_available)
zeroentry math_state_restore
/* runs on exception stack */
-ENTRY(debug)
+KPROBE_ENTRY(debug)
CFI_STARTPROC
pushq $0
CFI_ADJUST_CFA_OFFSET 8
paranoidentry do_debug
jmp paranoid_exit
CFI_ENDPROC
+ .previous .text
/* runs on exception stack */
ENTRY(nmi)
@@ -854,8 +856,9 @@ paranoid_schedule:
jmp paranoid_userspace
CFI_ENDPROC
-ENTRY(int3)
+KPROBE_ENTRY(int3)
zeroentry do_int3
+ .previous .text
ENTRY(overflow)
zeroentry do_overflow
@@ -892,8 +895,9 @@ ENTRY(stack_segment)
jmp paranoid_exit
CFI_ENDPROC
-ENTRY(general_protection)
+KPROBE_ENTRY(general_protection)
errorentry do_general_protection
+ .previous .text
ENTRY(alignment_check)
errorentry do_alignment_check
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index 30c843a5efdd..f062aa03bab7 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -25,7 +25,7 @@
#endif
/* which logical CPU number maps to which CPU (physical APIC ID) */
-u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
diff --git a/arch/x86_64/kernel/genapic_cluster.c b/arch/x86_64/kernel/genapic_cluster.c
index 9703da7202e3..f6523dd1bc09 100644
--- a/arch/x86_64/kernel/genapic_cluster.c
+++ b/arch/x86_64/kernel/genapic_cluster.c
@@ -72,10 +72,14 @@ static void cluster_send_IPI_mask(cpumask_t mask, int vector)
static void cluster_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
+ int me = get_cpu(); /* Ensure we are not preempted when we clear */
+
+ cpu_clear(me, mask);
if (!cpus_empty(mask))
cluster_send_IPI_mask(mask, vector);
+
+ put_cpu();
}
static void cluster_send_IPI_all(int vector)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index d206d7e49cf5..ba1a744e9bf0 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -70,7 +70,7 @@ static struct irq_pin_list {
short apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];
-int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
#ifdef CONFIG_PCI_MSI
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
@@ -78,6 +78,54 @@ int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
#define vector_to_irq(vector) (vector)
#endif
+#define __DO_ACTION(R, ACTION, FINAL) \
+ \
+{ \
+ int pin; \
+ struct irq_pin_list *entry = irq_2_pin + irq; \
+ \
+ for (;;) { \
+ unsigned int reg; \
+ pin = entry->pin; \
+ if (pin == -1) \
+ break; \
+ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
+ reg ACTION; \
+ io_apic_modify(entry->apic, reg); \
+ if (!entry->next) \
+ break; \
+ entry = irq_2_pin + entry->next; \
+ } \
+ FINAL; \
+}
+
+#ifdef CONFIG_SMP
+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ unsigned long flags;
+ unsigned int dest;
+ cpumask_t tmp;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+ tmp = TARGET_CPUS;
+
+ cpus_and(mask, tmp, CPU_MASK_ALL);
+
+ dest = cpu_mask_to_apicid(mask);
+
+ /*
+ * Only the high 8 bits are valid.
+ */
+ dest = SET_APIC_LOGICAL_ID(dest);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __DO_ACTION(1, = dest, )
+ set_irq_info(irq, mask);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+#endif
+
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
@@ -101,26 +149,6 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
entry->pin = pin;
}
-#define __DO_ACTION(R, ACTION, FINAL) \
- \
-{ \
- int pin; \
- struct irq_pin_list *entry = irq_2_pin + irq; \
- \
- for (;;) { \
- unsigned int reg; \
- pin = entry->pin; \
- if (pin == -1) \
- break; \
- reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
- reg ACTION; \
- io_apic_modify(entry->apic, reg); \
- if (!entry->next) \
- break; \
- entry = irq_2_pin + entry->next; \
- } \
- FINAL; \
-}
#define DO_ACTION(name,R,ACTION, FINAL) \
\
@@ -655,7 +683,7 @@ static inline int IO_APIC_irq_trigger(int irq)
}
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq)
{
@@ -767,6 +795,7 @@ static void __init setup_IO_APIC_irqs(void)
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+ set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
}
@@ -1314,6 +1343,7 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
*/
static void ack_edge_ioapic_irq(unsigned int irq)
{
+ move_irq(irq);
if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
== (IRQ_PENDING | IRQ_DISABLED))
mask_IO_APIC_irq(irq);
@@ -1343,26 +1373,10 @@ static unsigned int startup_level_ioapic_irq (unsigned int irq)
static void end_level_ioapic_irq (unsigned int irq)
{
+ move_irq(irq);
ack_APIC_irq();
}
-static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
-{
- unsigned long flags;
- unsigned int dest;
-
- dest = cpu_mask_to_apicid(mask);
-
- /*
- * Only the high 8 bits are valid.
- */
- dest = SET_APIC_LOGICAL_ID(dest);
-
- spin_lock_irqsave(&ioapic_lock, flags);
- __DO_ACTION(1, = dest, )
- spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
#ifdef CONFIG_PCI_MSI
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
@@ -1375,6 +1389,7 @@ static void ack_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
+ move_native_irq(vector);
ack_edge_ioapic_irq(irq);
}
@@ -1389,6 +1404,7 @@ static void end_level_ioapic_vector (unsigned int vector)
{
int irq = vector_to_irq(vector);
+ move_native_irq(vector);
end_level_ioapic_irq(irq);
}
@@ -1406,14 +1422,17 @@ static void unmask_IO_APIC_vector (unsigned int vector)
unmask_IO_APIC_irq(irq);
}
+#ifdef CONFIG_SMP
static void set_ioapic_affinity_vector (unsigned int vector,
cpumask_t cpu_mask)
{
int irq = vector_to_irq(vector);
+ set_native_irq_info(vector, cpu_mask);
set_ioapic_affinity_irq(irq, cpu_mask);
}
-#endif
+#endif // CONFIG_SMP
+#endif // CONFIG_PCI_MSI
/*
* Level and edge triggered IO-APIC interrupts need different handling,
@@ -1424,7 +1443,7 @@ static void set_ioapic_affinity_vector (unsigned int vector,
* races.
*/
-static struct hw_interrupt_type ioapic_edge_type = {
+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
.typename = "IO-APIC-edge",
.startup = startup_edge_ioapic,
.shutdown = shutdown_edge_ioapic,
@@ -1432,10 +1451,12 @@ static struct hw_interrupt_type ioapic_edge_type = {
.disable = disable_edge_ioapic,
.ack = ack_edge_ioapic,
.end = end_edge_ioapic,
+#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
+#endif
};
-static struct hw_interrupt_type ioapic_level_type = {
+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
.typename = "IO-APIC-level",
.startup = startup_level_ioapic,
.shutdown = shutdown_level_ioapic,
@@ -1443,7 +1464,9 @@ static struct hw_interrupt_type ioapic_level_type = {
.disable = disable_level_ioapic,
.ack = mask_and_ack_level_ioapic,
.end = end_level_ioapic,
+#ifdef CONFIG_SMP
.set_affinity = set_ioapic_affinity,
+#endif
};
static inline void init_IO_APIC_traps(void)
@@ -1506,7 +1529,7 @@ static void ack_lapic_irq (unsigned int irq)
static void end_lapic_irq (unsigned int i) { /* nothing */ }
-static struct hw_interrupt_type lapic_irq_type = {
+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
.typename = "local-APIC-edge",
.startup = NULL, /* startup_irq() not used for IRQ0 */
.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
@@ -1918,6 +1941,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
return 0;
@@ -1931,6 +1955,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
* we need to reprogram the ioredtbls to cater for the cpus which have come online
* so mask in all cases should simply be TARGET_CPUS
*/
+#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
@@ -1949,3 +1974,4 @@ void __init setup_ioapic_dest(void)
}
}
+#endif
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 5c6dc7051482..df08c43276a0 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -74,7 +74,7 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
return 0;
}
-int arch_prepare_kprobe(struct kprobe *p)
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* insn: must be on special executable page on x86_64. */
up(&kprobe_mutex);
@@ -189,7 +189,7 @@ static inline s32 *is_riprel(u8 *insn)
return NULL;
}
-void arch_copy_kprobe(struct kprobe *p)
+void __kprobes arch_copy_kprobe(struct kprobe *p)
{
s32 *ripdisp;
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
@@ -215,21 +215,21 @@ void arch_copy_kprobe(struct kprobe *p)
p->opcode = *p->addr;
}
-void arch_arm_kprobe(struct kprobe *p)
+void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_disarm_kprobe(struct kprobe *p)
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void arch_remove_kprobe(struct kprobe *p)
+void __kprobes arch_remove_kprobe(struct kprobe *p)
{
up(&kprobe_mutex);
free_insn_slot(p->ainsn.insn);
@@ -261,7 +261,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
kprobe_saved_rflags &= ~IF_MASK;
}
-static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
regs->eflags |= TF_MASK;
regs->eflags &= ~IF_MASK;
@@ -272,7 +272,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->rip = (unsigned long)p->ainsn.insn;
}
-void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
{
unsigned long *sara = (unsigned long *)regs->rsp;
struct kretprobe_instance *ri;
@@ -295,7 +296,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled thorough out this function.
*/
-int kprobe_handler(struct pt_regs *regs)
+int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
@@ -310,7 +311,8 @@ int kprobe_handler(struct pt_regs *regs)
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if (kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
regs->eflags |= kprobe_saved_rflags;
unlock_kprobes();
@@ -360,7 +362,10 @@ int kprobe_handler(struct pt_regs *regs)
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
*/
+ regs->rip = (unsigned long)addr;
ret = 1;
}
/* Not one of ours: let kernel handle it */
@@ -399,7 +404,7 @@ no_kprobe:
/*
* Called when we hit the probe point at kretprobe_trampoline
*/
-int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
@@ -478,7 +483,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
*/
-static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long *tos = (unsigned long *)regs->rsp;
unsigned long next_rip = 0;
@@ -536,7 +541,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
* remain disabled thoroughout this function. And we hold kprobe lock.
*/
-int post_kprobe_handler(struct pt_regs *regs)
+int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
if (!kprobe_running())
return 0;
@@ -571,7 +576,7 @@ out:
}
/* Interrupts disabled, kprobe_lock held. */
-int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
if (current_kprobe->fault_handler
&& current_kprobe->fault_handler(current_kprobe, regs, trapnr))
@@ -590,8 +595,8 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
/*
* Wrapper routine for handling exceptions.
*/
-int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
- void *data)
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
switch (val) {
@@ -619,7 +624,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
return NOTIFY_DONE;
}
-int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
@@ -640,7 +645,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
-void jprobe_return(void)
+void __kprobes jprobe_return(void)
{
preempt_enable_no_resched();
asm volatile (" xchg %%rbx,%%rsp \n"
@@ -651,7 +656,7 @@ void jprobe_return(void)
(jprobe_saved_rsp):"memory");
}
-int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
u8 *addr = (u8 *) (regs->rip - 1);
unsigned long stack_addr = (unsigned long)jprobe_saved_rsp;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 64a8e05d5811..caf164959e19 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -463,6 +463,8 @@ void touch_nmi_watchdog (void)
*/
for (i = 0; i < NR_CPUS; i++)
per_cpu(nmi_touch, i) = 1;
+
+ touch_softlockup_watchdog();
}
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
@@ -522,14 +524,14 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
nmi_enter();
add_pda(__nmi_count,1);
- if (!nmi_callback(regs, cpu))
+ if (!rcu_dereference(nmi_callback)(regs, cpu))
default_do_nmi(regs);
nmi_exit();
}
void set_nmi_callback(nmi_callback_t callback)
{
- nmi_callback = callback;
+ rcu_assign_pointer(nmi_callback, callback);
}
void unset_nmi_callback(void)
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7577f9d7a75d..8661f82ac70b 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -310,6 +310,7 @@ void __show_regs(struct pt_regs * regs)
void show_regs(struct pt_regs *regs)
{
+ printk("CPU %d:", smp_processor_id());
__show_regs(regs);
show_trace(&regs->rsp);
}
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 116a491e2961..b356f8e6adfe 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -65,7 +65,7 @@
* Machine setup..
*/
-struct cpuinfo_x86 boot_cpu_data;
+struct cpuinfo_x86 boot_cpu_data __read_mostly;
unsigned long mmu_cr4_features;
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 34082c1cc41e..e3ffcacc8c90 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -36,7 +36,7 @@ struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
-unsigned long __supported_pte_mask = ~0UL;
+unsigned long __supported_pte_mask __read_mostly = ~0UL;
static int do_not_nx __initdata = 0;
/* noexec=on|off
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index fa25e39fe54d..90aeccd15190 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -62,13 +62,13 @@
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
/* Package ID of each logical CPU */
-u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
EXPORT_SYMBOL(phys_proc_id);
EXPORT_SYMBOL(cpu_core_id);
/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map;
+cpumask_t cpu_online_map __read_mostly;
EXPORT_SYMBOL(cpu_online_map);
@@ -88,8 +88,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int smp_threads_ready;
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
/*
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 66bf6ddeb0c3..7b6abe058253 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -176,10 +176,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
@@ -471,7 +468,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* off) isn't likely to go away much sooner anyway.
*/
- if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
+ if (ntp_synced() && xtime.tv_sec > rtc_update &&
abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
set_rtc_mmss(xtime.tv_sec);
rtc_update = xtime.tv_sec + 660;
@@ -1041,6 +1038,7 @@ static int timer_resume(struct sys_device *dev)
write_sequnlock_irqrestore(&xtime_lock,flags);
jiffies += sleep_length;
wall_jiffies += sleep_length;
+ touch_softlockup_watchdog();
return 0;
}
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6ead433a3885..f238d6078a5a 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/nmi.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -419,8 +420,9 @@ void die_nmi(char *str, struct pt_regs *regs)
do_exit(SIGSEGV);
}
-static void do_trap(int trapnr, int signr, char *str,
- struct pt_regs * regs, long error_code, siginfo_t *info)
+static void __kprobes do_trap(int trapnr, int signr, char *str,
+ struct pt_regs * regs, long error_code,
+ siginfo_t *info)
{
conditional_sti(regs);
@@ -504,7 +506,8 @@ DO_ERROR(18, SIGSEGV, "reserved", reserved)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
-asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
+asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
+ long error_code)
{
conditional_sti(regs);
@@ -622,7 +625,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
io_check_error(reason, regs);
}
-asmlinkage void do_int3(struct pt_regs * regs, long error_code)
+asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
{
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
return;
@@ -653,7 +656,8 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
}
/* runs on IST stack. */
-asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
+asmlinkage void __kprobes do_debug(struct pt_regs * regs,
+ unsigned long error_code)
{
unsigned long condition;
struct task_struct *tsk = current;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 2a94f9b60b2d..d4abb07af52d 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -21,6 +21,7 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x9090
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index ca914c3bd49c..816732d8858c 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -23,6 +23,7 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/compiler.h>
#include <linux/module.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -294,7 +295,8 @@ int exception_trace = 1;
* bit 2 == 0 means kernel, 1 means user-mode
* bit 3 == 1 means fault was an instruction fetch
*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long error_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 6a156f5692ae..04f7a33e144c 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -22,14 +22,14 @@
#define Dprintk(x...)
#endif
-struct pglist_data *node_data[MAX_NUMNODES];
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
bootmem_data_t plat_node_bdata[MAX_NUMNODES];
int memnode_shift;
u8 memnodemap[NODEMAPSIZE];
-unsigned char cpu_to_node[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
-cpumask_t node_to_cpumask[MAX_NUMNODES];
+unsigned char cpu_to_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
+cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
int numa_off __initdata;
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index e07287db5a40..1ac7d5ce7456 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -122,10 +122,7 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
return 0;
}
@@ -184,7 +181,7 @@ again:
next += CCOUNT_PER_JIFFY;
do_timer (regs); /* Linux handler in kernel/timer.c */
- if ((time_status & STA_UNSYNC) == 0 &&
+ if (ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
jiffies - wall_jiffies == 1) {