diff options
267 files changed, 3093 insertions, 1358 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 0b1a3f97f285..a0d479d1e1dd 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl @@ -1961,6 +1961,12 @@ machines due to caching. </sect1> </chapter> + <chapter id="apiref"> + <title>Mutex API reference</title> +!Iinclude/linux/mutex.h +!Ekernel/mutex.c + </chapter> + <chapter id="references"> <title>Further reading</title> diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt new file mode 100644 index 000000000000..e578feed6d81 --- /dev/null +++ b/Documentation/block/cfq-iosched.txt @@ -0,0 +1,45 @@ +CFQ ioscheduler tunables +======================== + +slice_idle +---------- +This specifies how long CFQ should idle for next request on certain cfq queues +(for sequential workloads) and service trees (for random workloads) before +queue is expired and CFQ selects next queue to dispatch from. + +By default slice_idle is a non-zero value. That means by default we idle on +queues/service trees. This can be very helpful on highly seeky media like +single spindle SATA/SAS disks where we can cut down on overall number of +seeks and see improved throughput. + +Setting slice_idle to 0 will remove all the idling on queues/service tree +level and one should see an overall improved throughput on faster storage +devices like multiple SATA/SAS disks in hardware RAID configuration. The down +side is that isolation provided from WRITES also goes down and notion of +IO priority becomes weaker. + +So depending on storage and workload, it might be useful to set slice_idle=0. +In general I think for SATA/SAS disks and software RAID of SATA/SAS disks +keeping slice_idle enabled should be useful. For any configurations where +there are multiple spindles behind single LUN (Host based hardware RAID +controller or for storage arrays), setting slice_idle=0 might end up in better +throughput and acceptable latencies. + +CFQ IOPS Mode for group scheduling +=================================== +Basic CFQ design is to provide priority based time slices. Higher priority +process gets bigger time slice and lower priority process gets smaller time +slice. Measuring time becomes harder if storage is fast and supports NCQ and +it would be better to dispatch multiple requests from multiple cfq queues in +request queue at a time. In such scenario, it is not possible to measure time +consumed by single queue accurately. + +What is possible though is to measure number of requests dispatched from a +single queue and also allow dispatch from multiple cfq queue at the same time. +This effectively becomes the fairness in terms of IOPS (IO operations per +second). + +If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches +to IOPS mode and starts providing fairness in terms of number of requests +dispatched. Note that this mode switching takes effect only for group +scheduling. For non-cgroup users nothing should change. diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt index 48e0b21b0059..6919d62591d9 100644 --- a/Documentation/cgroups/blkio-controller.txt +++ b/Documentation/cgroups/blkio-controller.txt @@ -217,6 +217,7 @@ Details of cgroup files CFQ sysfs tunable ================= /sys/block/<disk>/queue/iosched/group_isolation +----------------------------------------------- If group_isolation=1, it provides stronger isolation between groups at the expense of throughput. By default group_isolation is 0. In general that @@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient and one wants stronger isolation between groups, then set group_isolation=1 but this will come at cost of reduced throughput. +/sys/block/<disk>/queue/iosched/slice_idle +------------------------------------------ +On a faster hardware CFQ can be slow, especially with sequential workload. +This happens because CFQ idles on a single queue and single queue might not +drive deeper request queue depths to keep the storage busy. In such scenarios +one can try setting slice_idle=0 and that would switch CFQ to IOPS +(IO operations per second) mode on NCQ supporting hardware. + +That means CFQ will not idle between cfq queues of a cfq group and hence be +able to driver higher queue depth and achieve better throughput. That also +means that cfq provides fairness among groups in terms of IOPS and not in +terms of disk time. + +/sys/block/<disk>/queue/iosched/group_idle +------------------------------------------ +If one disables idling on individual cfq queues and cfq service trees by +setting slice_idle=0, group_idle kicks in. That means CFQ will still idle +on the group in an attempt to provide fairness among groups. + +By default group_idle is same as slice_idle and does not do anything if +slice_idle is enabled. + +One can experience an overall throughput drop if you have created multiple +groups and put applications in that group which are not driving enough +IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle +on individual groups and throughput should improve. + What works ========== - Currently only sync IO queues are support. All the buffered writes are diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt index d96a6dba5748..9633da01ff46 100644 --- a/Documentation/gpio.txt +++ b/Documentation/gpio.txt @@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders. If you want to initialize a structure with an invalid GPIO number, use some negative number (perhaps "-EINVAL"); that will never be valid. To -test if a number could reference a GPIO, you may use this predicate: +test if such number from such a structure could reference a GPIO, you +may use this predicate: int gpio_is_valid(int number); A number that's not valid will be rejected by calls which may request or free GPIOs (see below). Other numbers may also be rejected; for -example, a number might be valid but unused on a given board. - -Whether a platform supports multiple GPIO controllers is currently a -platform-specific implementation issue. +example, a number might be valid but temporarily unused on a given board. +Whether a platform supports multiple GPIO controllers is a platform-specific +implementation issue, as are whether that support can leave "holes" in the space +of GPIO numbers, and whether new controllers can be added at runtime. Such issues +can affect things including whether adjacent GPIO numbers are both valid. Using GPIOs ----------- @@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). -They may also want to provide a custom value for ARCH_NR_GPIOS. -ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled +It may also provide a custom value for ARCH_NR_GPIOS, so that it better +reflects the number of GPIOs in actual use on that platform, without +wasting static table space. (It should count both built-in/SoC GPIOs and +also ones on GPIO expanders. + +ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled into the kernel on that architecture. -ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user +ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user can enable it and build it into the kernel optionally. If neither of these options are selected, the platform does not support diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt index c91ccc0720fa..38c10fd7f411 100644 --- a/Documentation/mutex-design.txt +++ b/Documentation/mutex-design.txt @@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler mutex semantics are sufficient for your code, then there are a couple of advantages of mutexes: - - 'struct mutex' is smaller on most architectures: .e.g on x86, + - 'struct mutex' is smaller on most architectures: E.g. on x86, 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. A smaller structure size means less RAM footprint, and better CPU-cache utilization. @@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined: void mutex_lock_nested(struct mutex *lock, unsigned int subclass); int mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); + int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index ce46fa1e643e..37c6aad5e590 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -296,6 +296,7 @@ Conexant 5051 Conexant 5066 ============= laptop Basic Laptop config (default) + hp-laptop HP laptops, e g G60 dell-laptop Dell laptops dell-vostro Dell Vostro olpc-xo-1_5 OLPC XO 1.5 diff --git a/MAINTAINERS b/MAINTAINERS index 087912aa09bd..e7c528ff1013 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1445,6 +1445,16 @@ S: Maintained F: Documentation/video4linux/cafe_ccic F: drivers/media/video/cafe_ccic* +CAIF NETWORK LAYER +M: Sjur Braendeland <sjur.brandeland@stericsson.com> +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/caif/ +F: drivers/net/caif/ +F: include/linux/caif/ +F: include/net/caif/ +F: net/caif/ + CALGARY x86-64 IOMMU M: Muli Ben-Yehuda <muli@il.ibm.com> M: "Jon D. Mason" <jdmason@kudzu.us> @@ -2787,11 +2797,6 @@ S: Maintained F: arch/x86/kernel/hpet.c F: arch/x86/include/asm/hpet.h -HPET: ACPI -M: Bob Picco <bob.picco@hp.com> -S: Maintained -F: drivers/char/hpet.c - HPFS FILESYSTEM M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi @@ -3404,7 +3409,7 @@ F: drivers/s390/kvm/ KEXEC M: Eric Biederman <ebiederm@xmission.com> -W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ +W: http://kernel.org/pub/linux/utils/kernel/kexec/ L: kexec@lists.infradead.org S: Maintained F: include/linux/kexec.h @@ -4810,6 +4815,7 @@ RCUTORTURE MODULE M: Josh Triplett <josh@freedesktop.org> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> S: Supported +T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git F: Documentation/RCU/torture.txt F: kernel/rcutorture.c @@ -4834,6 +4840,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> W: http://www.rdrop.com/users/paulmck/rclock/ S: Supported +T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git F: Documentation/RCU/ F: include/linux/rcu* F: include/linux/srcu* @@ -4841,12 +4848,10 @@ F: kernel/rcu* F: kernel/srcu* X: kernel/rcutorture.c -REAL TIME CLOCK DRIVER +REAL TIME CLOCK DRIVER (LEGACY) M: Paul Gortmaker <p_gortmaker@yahoo.com> S: Maintained -F: Documentation/rtc.txt -F: drivers/rtc/ -F: include/linux/rtc.h +F: drivers/char/rtc.c REAL TIME CLOCK (RTC) SUBSYSTEM M: Alessandro Zummo <a.zummo@towertech.it> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 16bc8eb4901c..553b7cf17bfb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1576,97 +1576,6 @@ config AUTO_ZRELADDR 0xf8000000. This assumes the zImage being placed in the first 128MB from start of memory. -config ZRELADDR - hex "Physical address of the decompressed kernel image" - depends on !AUTO_ZRELADDR - default 0x00008000 if ARCH_BCMRING ||\ - ARCH_CNS3XXX ||\ - ARCH_DOVE ||\ - ARCH_EBSA110 ||\ - ARCH_FOOTBRIDGE ||\ - ARCH_INTEGRATOR ||\ - ARCH_IOP13XX ||\ - ARCH_IOP33X ||\ - ARCH_IXP2000 ||\ - ARCH_IXP23XX ||\ - ARCH_IXP4XX ||\ - ARCH_KIRKWOOD ||\ - ARCH_KS8695 ||\ - ARCH_LOKI ||\ - ARCH_MMP ||\ - ARCH_MV78XX0 ||\ - ARCH_NOMADIK ||\ - ARCH_NUC93X ||\ - ARCH_NS9XXX ||\ - ARCH_ORION5X ||\ - ARCH_SPEAR3XX ||\ - ARCH_SPEAR6XX ||\ - ARCH_TEGRA ||\ - ARCH_U8500 ||\ - ARCH_VERSATILE ||\ - ARCH_W90X900 - default 0x08008000 if ARCH_MX1 ||\ - ARCH_SHARK - default 0x10008000 if ARCH_MSM ||\ - ARCH_OMAP1 ||\ - ARCH_RPC - default 0x20008000 if ARCH_S5P6440 ||\ - ARCH_S5P6442 ||\ - ARCH_S5PC100 ||\ - ARCH_S5PV210 - default 0x30008000 if ARCH_S3C2410 ||\ - ARCH_S3C2400 ||\ - ARCH_S3C2412 ||\ - ARCH_S3C2416 ||\ - ARCH_S3C2440 ||\ - ARCH_S3C2443 - default 0x40008000 if ARCH_STMP378X ||\ - ARCH_STMP37XX ||\ - ARCH_SH7372 ||\ - ARCH_SH7377 ||\ - ARCH_S5PV310 - default 0x50008000 if ARCH_S3C64XX ||\ - ARCH_SH7367 - default 0x60008000 if ARCH_VEXPRESS - default 0x80008000 if ARCH_MX25 ||\ - ARCH_MX3 ||\ - ARCH_NETX ||\ - ARCH_OMAP2PLUS ||\ - ARCH_PNX4008 - default 0x90008000 if ARCH_MX5 ||\ - ARCH_MX91231 - default 0xa0008000 if ARCH_IOP32X ||\ - ARCH_PXA ||\ - MACH_MX27 - default 0xc0008000 if ARCH_LH7A40X ||\ - MACH_MX21 - default 0xf0008000 if ARCH_AAEC2000 ||\ - ARCH_L7200 - default 0xc0028000 if ARCH_CLPS711X - default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45) - default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45) - default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX - default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX - default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET - default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET - default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET - default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET - default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET - default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP - default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP - default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET - default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET - default 0xc0208000 if ARCH_SA1100 && SA1111 - default 0xc0008000 if ARCH_SA1100 && !SA1111 - default 0x30108000 if ARCH_S3C2410 && PM_H1940 - default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM - default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM - help - ZRELADDR is the physical address where the decompressed kernel - image will be placed. ZRELADDR has to be specified when the - assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is - selected. - endmenu menu "CPU Power Management" diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index f705213caa88..4a590f4113e2 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -14,16 +14,18 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh ifneq ($(MACHINE),) --include $(srctree)/$(MACHINE)/Makefile.boot +include $(srctree)/$(MACHINE)/Makefile.boot endif # Note: the following conditions must always be true: +# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) # PARAMS_PHYS must be within 4MB of ZRELADDR # INITRD_PHYS must be in RAM +ZRELADDR := $(zreladdr-y) PARAMS_PHYS := $(params_phys-y) INITRD_PHYS := $(initrd_phys-y) -export INITRD_PHYS PARAMS_PHYS +export ZRELADDR INITRD_PHYS PARAMS_PHYS targets := Image zImage xipImage bootpImage uImage @@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE $@ ifeq ($(CONFIG_ZBOOT_ROM),y) $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) else -$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR) +$(obj)/uImage: LOADADDR=$(ZRELADDR) endif ifeq ($(CONFIG_THUMB2_KERNEL),y) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 68775e33476c..b23f6bc46cfa 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -79,6 +79,10 @@ endif EXTRA_CFLAGS := -fpic -fno-builtin EXTRA_AFLAGS := -Wa,-march=all +# Supply ZRELADDR to the decompressor via a linker symbol. +ifneq ($(CONFIG_AUTO_ZRELADDR),y) +LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR) +endif ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 endif diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 6af9907c3b5c..6825c34646d4 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -177,7 +177,7 @@ not_angel: and r4, pc, #0xf8000000 add r4, r4, #TEXT_OFFSET #else - ldr r4, =CONFIG_ZRELADDR + ldr r4, =zreladdr #endif subs r0, r0, r1 @ calculate the delta offset diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 6c0913562455..7974baacafce 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev) return 0; } +int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", + __func__, dma_addr, size); + return (dev->bus == &pci_bus_type) && + ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); +} + int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) { it8152_io.start = IT8152_IO_BASE + 0x12000; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c226fe10553e..c568da7dcae4 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *); * DMA access and 1 if the buffer needs to be bounced. * */ -#ifdef CONFIG_SA1111 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); -#else -static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr, - size_t size) -{ - return 0; -} -#endif /* * The DMA API, implemented by dmabounce.c. See below for descriptions. diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 48837e6d8887..b5799a3b7117 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -17,7 +17,7 @@ * counter interrupts are regular interrupts and not an NMI. This * means that when we receive the interrupt we can call * perf_event_do_pending() that handles all of the work with - * interrupts enabled. + * interrupts disabled. */ static inline void set_perf_event_pending(void) diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index d02cfb683487..c891eb76c0e3 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -393,6 +393,9 @@ #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) #define __NR_accept4 (__NR_SYSCALL_BASE+366) +#define __NR_fanotify_init (__NR_SYSCALL_BASE+367) +#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) +#define __NR_prlimit64 (__NR_SYSCALL_BASE+369) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index afeb71fa72cb..5c26eccef998 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -376,6 +376,9 @@ CALL(sys_perf_event_open) /* 365 */ CALL(sys_recvmmsg) CALL(sys_accept4) + CALL(sys_fanotify_init) + CALL(sys_fanotify_mark) + CALL(sys_prlimit64) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 417c392ddf1c..ecbb0288e5dd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_event = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return armpmu->get_event_idx(cpuc, &fake_event) >= 0; } @@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num, /* * Handle the pending perf events. * - * Note: this call *must* be run with interrupts enabled. For - * platforms that can have the PMU interrupts raised as a PMI, this + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ perf_event_do_pending(); @@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) /* * Handle the pending perf events. * - * Note: this call *must* be run with interrupts enabled. For - * platforms that can have the PMU interrupts raised as a PMI, this + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ perf_event_do_pending(); diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 753c0d31a3d3..c67b47f1c0fd 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -121,8 +121,8 @@ static struct clk ssc1_clk = { .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, .type = CLK_TYPE_PERIPHERAL, }; -static struct clk tcb_clk = { - .name = "tcb_clk", +static struct clk tcb0_clk = { + .name = "tcb0_clk", .pmc_mask = 1 << AT91SAM9G45_ID_TCB, .type = CLK_TYPE_PERIPHERAL, }; @@ -192,6 +192,14 @@ static struct clk ohci_clk = { .parent = &uhphs_clk, }; +/* One additional fake clock for second TC block */ +static struct clk tcb1_clk = { + .name = "tcb1_clk", + .pmc_mask = 0, + .type = CLK_TYPE_PERIPHERAL, + .parent = &tcb0_clk, +}; + static struct clk *periph_clocks[] __initdata = { &pioA_clk, &pioB_clk, @@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = { &spi1_clk, &ssc0_clk, &ssc1_clk, - &tcb_clk, + &tcb0_clk, &pwm_clk, &tsc_clk, &dma_clk, @@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = { &mmc1_clk, // irq0 &ohci_clk, + &tcb1_clk, }; /* diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 809114d5a5a6..5e71ccd5e7d3 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = { .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, .flags = IORESOURCE_MEM, }, - [2] = { + [1] = { .start = AT91SAM9G45_ID_DMA, .end = AT91SAM9G45_ID_DMA, .flags = IORESOURCE_IRQ, @@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = { static void __init at91_add_device_tc(void) { /* this chip has one clock and irq for all six TC channels */ - at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); + at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); platform_device_register(&at91sam9g45_tcb0_device); - at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); + at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); platform_device_register(&at91sam9g45_tcb1_device); } #else diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index c4c8865d52d7..65eb0943194f 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = { .start = AT91_PIN_PC11, .end = AT91_PIN_PC11, .flags = IORESOURCE_IRQ + | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, } }; static struct dm9000_plat_data dm9000_platdata = { - .flags = DM9000_PLATF_16BITONLY, + .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM, }; static struct platform_device dm9000_device = { @@ -168,17 +169,6 @@ static struct at91_udc_data __initdata ek_udc_data = { /* - * MCI (SD/MMC) - */ -static struct at91_mmc_data __initdata ek_mmc_data = { - .wire4 = 1, -// .det_pin = ... not connected -// .wp_pin = ... not connected -// .vcc_pin = ... not connected -}; - - -/* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { @@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void) at91_add_device_nand(&ek_nand_data); } +/* + * SPI related devices + */ +#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE) /* * ADS7846 Touchscreen @@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = { #endif }; +#else /* CONFIG_SPI_ATMEL_* */ +/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */ + +/* + * MCI (SD/MMC) + * det_pin, wp_pin and vcc_pin are not connected + */ +static struct at91_mmc_data __initdata ek_mmc_data = { + .wire4 = 1, +}; + +#endif /* CONFIG_SPI_ATMEL_* */ + /* * LCD Controller diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index 7f7da439341f..7525cee3983f 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c @@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init); int __init clk_register(struct clk *clk) { if (clk_is_peripheral(clk)) { - clk->parent = &mck; + if (!clk->parent) + clk->parent = &mck; clk->mode = pmc_periph_mode; list_add_tail(&clk->node, &clocks); } diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 8bf3cec98cfa..4566bd1c8660 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void) clkdev_add_table(clocks, ARRAY_SIZE(clocks)); return 0; } -arch_initcall(ep93xx_clock_init); +postcore_initcall(ep93xx_clock_init); diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c index 91931dcb0689..4aaadc753d3e 100644 --- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c +++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c @@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = { * Add platform devices present on this baseboard and init * them from CPU side as far as required to use them later on */ -void __init eukrea_mbimxsd_baseboard_init(void) +void __init eukrea_mbimxsd25_baseboard_init(void) { if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, ARRAY_SIZE(eukrea_mbimxsd_pads))) diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c index a5f0174290b4..e064bb3d6919 100644 --- a/arch/arm/mach-mx25/mach-cpuimx25.c +++ b/arch/arm/mach-mx25/mach-cpuimx25.c @@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void) if (!otg_mode_host) mxc_register_device(&otg_udc_device, &otg_device_pdata); -#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD - eukrea_mbimxsd_baseboard_init(); +#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD + eukrea_mbimxsd25_baseboard_init(); #endif } diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c index d3af0fdf8475..7a62e744a8b0 100644 --- a/arch/arm/mach-mx3/clock-imx35.c +++ b/arch/arm/mach-mx3/clock-imx35.c @@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void) aad = &clk_consumer[(pdr0 >> 16) & 0xf]; if (aad->sel) - fref = fref * 2 / 3; + fref = fref * 3 / 4; return fref / aad->arm; } @@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk) { unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); struct arm_ahb_div *aad; - unsigned long fref = get_rate_mpll(); + unsigned long fref = get_rate_arm(); aad = &clk_consumer[(pdr0 >> 16) & 0xf]; @@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk) return get_rate_ahb(NULL) >> 1; } -static unsigned long get_3_3_div(unsigned long in) -{ - return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1); -} - static unsigned long get_rate_uart(struct clk *clk) { unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); - unsigned long div = get_3_3_div(pdr4 >> 10); + unsigned long div = ((pdr4 >> 10) & 0x3f) + 1; if (pdr3 & (1 << 14)) return get_rate_arm() / div; @@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk) break; } - return rate / get_3_3_div(div); + return rate / (div + 1); } static unsigned long get_rate_mshc(struct clk *clk) @@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk) else rate = get_rate_ppll(); - return rate / get_3_3_div((pdr2 >> 16) & 0x3f); + return rate / (((pdr2 >> 16) & 0x3f) + 1); } static unsigned long get_rate_otg(struct clk *clk) @@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk) else rate = get_rate_ppll(); - return rate / get_3_3_div((pdr4 >> 22) & 0x3f); + return rate / (((pdr4 >> 22) & 0x3f) + 1); } static unsigned long get_rate_ipg_per(struct clk *clk) { unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); - unsigned long div1, div2; + unsigned long div; if (pdr0 & (1 << 26)) { - div1 = (pdr4 >> 19) & 0x7; - div2 = (pdr4 >> 16) & 0x7; - return get_rate_arm() / ((div1 + 1) * (div2 + 1)); + div = (pdr4 >> 16) & 0x3f; + return get_rate_arm() / (div + 1); } else { - div1 = (pdr0 >> 12) & 0x7; - return get_rate_ahb(NULL) / div1; + div = (pdr0 >> 12) & 0x7; + return get_rate_ahb(NULL) / (div + 1); } } +static unsigned long get_rate_hsp(struct clk *clk) +{ + unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03; + unsigned long fref = get_rate_mpll(); + + if (fref > 400 * 1000 * 1000) { + switch (hsp_podf) { + case 0: + return fref >> 2; + case 1: + return fref >> 3; + case 2: + return fref / 3; + } + } else { + switch (hsp_podf) { + case 0: + case 2: + return fref / 3; + case 1: + return fref / 6; + } + } + + return 0; +} + static int clk_cgr_enable(struct clk *clk) { u32 reg; @@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL); DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); -DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL); +DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL); DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); @@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = { int __init mx35_clocks_init() { - unsigned int ll = 0; + unsigned int cgr2 = 3 << 26, cgr3 = 0; #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) - ll = (3 << 16); + cgr2 |= 3 << 16; #endif clkdev_add_table(lookups, ARRAY_SIZE(lookups)); @@ -499,8 +520,20 @@ int __init mx35_clocks_init() __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), CCM_BASE + CCM_CGR1); - __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2); - __raw_writel(0, CCM_BASE + CCM_CGR3); + + /* + * Check if we came up in internal boot mode. If yes, we need some + * extra clocks turned on, otherwise the MX35 boot ROM code will + * hang after a watchdog reset. + */ + if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) { + /* Additionally turn on UART1, SCC, and IIM clocks */ + cgr2 |= 3 << 16 | 3 << 4; + cgr3 |= 3 << 2; + } + + __raw_writel(cgr2, CCM_BASE + CCM_CGR2); + __raw_writel(cgr3, CCM_BASE + CCM_CGR3); mxc_timer_init(&gpt_clk, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c index 1dc5004df866..f8f15e3ac7a0 100644 --- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c +++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c @@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = { * Add platform devices present on this baseboard and init * them from CPU side as far as required to use them later on */ -void __init eukrea_mbimxsd_baseboard_init(void) +void __init eukrea_mbimxsd35_baseboard_init(void) { if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, ARRAY_SIZE(eukrea_mbimxsd_pads))) diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c index 9770a6a973be..2a4f8b781ba4 100644 --- a/arch/arm/mach-mx3/mach-cpuimx35.c +++ b/arch/arm/mach-mx3/mach-cpuimx35.c @@ -201,8 +201,8 @@ static void __init mxc_board_init(void) if (!otg_mode_host) mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); -#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD - eukrea_mbimxsd_baseboard_init(); +#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD + eukrea_mbimxsd35_baseboard_init(); #endif } diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c index 6af69def357f..57c10a9926cc 100644 --- a/arch/arm/mach-mx5/clock-mx51.c +++ b/arch/arm/mach-mx5/clock-mx51.c @@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk) { u32 reg; reg = __raw_readl(clk->enable_reg); - reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift); + reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift); __raw_writel(reg, clk->enable_reg); } diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 268a9bc6be8a..50d5939a78f1 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c @@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, return 0; } -static __init int pxa_cpufreq_init(struct cpufreq_policy *policy) +static int pxa_cpufreq_init(struct cpufreq_policy *policy) { int i; unsigned int freq; diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c index 27fa329d9a8b..0a0d0fe99220 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c @@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, return 0; } -static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) +static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) { int ret = -EINVAL; diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h index 7139e0dc26d1..4e1287070d21 100644 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h @@ -71,10 +71,10 @@ #define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) #define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) #define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) -#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X) -#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X) #define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) #define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) +#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X) +#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X) /* KEYPAD */ #define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile index 5e16b4c69222..ae416fe7daf2 100644 --- a/arch/arm/mach-shmobile/Makefile +++ b/arch/arm/mach-shmobile/Makefile @@ -3,7 +3,7 @@ # # Common objects -obj-y := timer.o console.o clock.o +obj-y := timer.o console.o clock.o pm_runtime.o # CPU objects obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 23d472f9525e..95935c83c306 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -25,6 +25,7 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mfd/sh_mobile_sdhi.h> +#include <linux/mfd/tmio.h> #include <linux/mmc/host.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> @@ -39,6 +40,7 @@ #include <linux/sh_clk.h> #include <linux/gpio.h> #include <linux/input.h> +#include <linux/leds.h> #include <linux/input/sh_keysc.h> #include <linux/usb/r8a66597.h> @@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_ocr_mask = MMC_VDD_165_195, + .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, }; static struct resource sdhi1_resources[] = { @@ -558,7 +561,7 @@ static struct resource fsi_resources[] = { static struct platform_device fsi_device = { .name = "sh_fsi2", - .id = 0, + .id = -1, .num_resources = ARRAY_SIZE(fsi_resources), .resource = fsi_resources, .dev = { @@ -650,7 +653,44 @@ static struct platform_device hdmi_device = { }, }; +static struct gpio_led ap4evb_leds[] = { + { + .name = "led4", + .gpio = GPIO_PORT185, + .default_state = LEDS_GPIO_DEFSTATE_ON, + }, + { + .name = "led2", + .gpio = GPIO_PORT186, + .default_state = LEDS_GPIO_DEFSTATE_ON, + }, + { + .name = "led3", + .gpio = GPIO_PORT187, + .default_state = LEDS_GPIO_DEFSTATE_ON, + }, + { + .name = "led1", + .gpio = GPIO_PORT188, + .default_state = LEDS_GPIO_DEFSTATE_ON, + } +}; + +static struct gpio_led_platform_data ap4evb_leds_pdata = { + .num_leds = ARRAY_SIZE(ap4evb_leds), + .leds = ap4evb_leds, +}; + +static struct platform_device leds_device = { + .name = "leds-gpio", + .id = 0, + .dev = { + .platform_data = &ap4evb_leds_pdata, + }, +}; + static struct platform_device *ap4evb_devices[] __initdata = { + &leds_device, &nor_flash_device, &smc911x_device, &sdhi0_device, @@ -840,20 +880,6 @@ static void __init ap4evb_init(void) gpio_request(GPIO_FN_CS5A, NULL); gpio_request(GPIO_FN_IRQ6_39, NULL); - /* enable LED 1 - 4 */ - gpio_request(GPIO_PORT185, NULL); - gpio_request(GPIO_PORT186, NULL); - gpio_request(GPIO_PORT187, NULL); - gpio_request(GPIO_PORT188, NULL); - gpio_direction_output(GPIO_PORT185, 1); - gpio_direction_output(GPIO_PORT186, 1); - gpio_direction_output(GPIO_PORT187, 1); - gpio_direction_output(GPIO_PORT188, 1); - gpio_export(GPIO_PORT185, 0); - gpio_export(GPIO_PORT186, 0); - gpio_export(GPIO_PORT187, 0); - gpio_export(GPIO_PORT188, 0); - /* enable Debug switch (S6) */ gpio_request(GPIO_PORT32, NULL); gpio_request(GPIO_PORT33, NULL); diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index fb4e9b1d788e..759468992ad2 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = { struct clk pllc2_clk = { .ops = &pllc2_clk_ops, - .flags = CLK_ENABLE_ON_INIT, .parent = &extal1_div2_clk, .freq_table = pllc2_freq_table, .parent_table = pllc2_parent, @@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = { enum { MSTP001, MSTP131, MSTP130, - MSTP129, MSTP128, + MSTP129, MSTP128, MSTP127, MSTP126, MSTP118, MSTP117, MSTP116, MSTP106, MSTP101, MSTP100, MSTP223, @@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ + [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */ + [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */ [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ @@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ - [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */ + [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */ [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ @@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ + CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */ + CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */ CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c index b7c705a213a2..6b7c7c42bc8f 100644 --- a/arch/arm/mach-shmobile/clock.c +++ b/arch/arm/mach-shmobile/clock.c @@ -1,8 +1,10 @@ /* - * SH-Mobile Timer + * SH-Mobile Clock Framework * * Copyright (C) 2010 Magnus Damm * + * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c new file mode 100644 index 000000000000..94912d3944d3 --- /dev/null +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -0,0 +1,169 @@ +/* + * arch/arm/mach-shmobile/pm_runtime.c + * + * Runtime PM support code for SuperH Mobile ARM + * + * Copyright (C) 2009-2010 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/sh_clk.h> +#include <linux/bitmap.h> + +#ifdef CONFIG_PM_RUNTIME +#define BIT_ONCE 0 +#define BIT_ACTIVE 1 +#define BIT_CLK_ENABLED 2 + +struct pm_runtime_data { + unsigned long flags; + struct clk *clk; +}; + +static void __devres_release(struct device *dev, void *res) +{ + struct pm_runtime_data *prd = res; + + dev_dbg(dev, "__devres_release()\n"); + + if (test_bit(BIT_CLK_ENABLED, &prd->flags)) + clk_disable(prd->clk); + + if (test_bit(BIT_ACTIVE, &prd->flags)) + clk_put(prd->clk); +} + +static struct pm_runtime_data *__to_prd(struct device *dev) +{ + return devres_find(dev, __devres_release, NULL, NULL); +} + +static void platform_pm_runtime_init(struct device *dev, + struct pm_runtime_data *prd) +{ + if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { + prd->clk = clk_get(dev, NULL); + if (!IS_ERR(prd->clk)) { + set_bit(BIT_ACTIVE, &prd->flags); + dev_info(dev, "clocks managed by runtime pm\n"); + } + } +} + +static void platform_pm_runtime_bug(struct device *dev, + struct pm_runtime_data *prd) +{ + if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) + dev_err(dev, "runtime pm suspend before resume\n"); +} + +int platform_pm_runtime_suspend(struct device *dev) +{ + struct pm_runtime_data *prd = __to_prd(dev); + + dev_dbg(dev, "platform_pm_runtime_suspend()\n"); + + platform_pm_runtime_bug(dev, prd); + + if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { + clk_disable(prd->clk); + clear_bit(BIT_CLK_ENABLED, &prd->flags); + } + + return 0; +} + +int platform_pm_runtime_resume(struct device *dev) +{ + struct pm_runtime_data *prd = __to_prd(dev); + + dev_dbg(dev, "platform_pm_runtime_resume()\n"); + + platform_pm_runtime_init(dev, prd); + + if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { + clk_enable(prd->clk); + set_bit(BIT_CLK_ENABLED, &prd->flags); + } + + return 0; +} + +int platform_pm_runtime_idle(struct device *dev) +{ + /* suspend synchronously to disable clocks immediately */ + return pm_runtime_suspend(dev); +} + +static int platform_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pm_runtime_data *prd; + + dev_dbg(dev, "platform_bus_notify() %ld !\n", action); + + if (action == BUS_NOTIFY_BIND_DRIVER) { + prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); + if (prd) + devres_add(dev, prd); + else + dev_err(dev, "unable to alloc memory for runtime pm\n"); + } + + return 0; +} + +#else /* CONFIG_PM_RUNTIME */ + +static int platform_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct clk *clk; + + dev_dbg(dev, "platform_bus_notify() %ld !\n", action); + + switch (action) { + case BUS_NOTIFY_BIND_DRIVER: + clk = clk_get(dev, NULL); + if (!IS_ERR(clk)) { + clk_enable(clk); + clk_put(clk); + dev_info(dev, "runtime pm disabled, clock forced on\n"); + } + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + clk = clk_get(dev, NULL); + if (!IS_ERR(clk)) { + clk_disable(clk); + clk_put(clk); + dev_info(dev, "runtime pm disabled, clock forced off\n"); + } + break; + } + + return 0; +} + +#endif /* CONFIG_PM_RUNTIME */ + +static struct notifier_block platform_bus_notifier = { + .notifier_call = platform_bus_notify +}; + +static int __init sh_pm_runtime_init(void) +{ + bus_register_notifier(&platform_bus_type, &platform_bus_notifier); + return 0; +} +core_initcall(sh_pm_runtime_init); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 33c3f570aaa0..a0a2928ae4dd 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -398,7 +398,7 @@ config CPU_V6 # ARMv6k config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP - depends on CPU_V6 + depends on CPU_V6 || CPU_V7 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) help Say Y here if your ARMv6 processor supports the 'K' extension. diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c704eed63c5d..4bc43e535d3b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) } } while (size -= PAGE_SIZE); + dsb(); + return (void *)c->vm_start; } return NULL; diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig index 0527e65318f4..6785db4179b8 100644 --- a/arch/arm/plat-mxc/Kconfig +++ b/arch/arm/plat-mxc/Kconfig @@ -43,6 +43,7 @@ config ARCH_MXC91231 config ARCH_MX5 bool "MX5-based" select CPU_V7 + select ARM_L1_CACHE_SHIFT_6 help This enables support for systems based on the Freescale i.MX51 family diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h index 634e3f4c454d..656acb45d434 100644 --- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h +++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h @@ -37,9 +37,9 @@ * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 */ -extern void eukrea_mbimx25_baseboard_init(void); +extern void eukrea_mbimxsd25_baseboard_init(void); extern void eukrea_mbimx27_baseboard_init(void); -extern void eukrea_mbimx35_baseboard_init(void); +extern void eukrea_mbimxsd35_baseboard_init(void); extern void eukrea_mbimx51_baseboard_init(void); #endif diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c index b3da9aad4295..3703ab28257f 100644 --- a/arch/arm/plat-mxc/tzic.c +++ b/arch/arm/plat-mxc/tzic.c @@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle) return -EAGAIN; for (i = 0; i < 4; i++) { - v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i]; - __raw_writel(v, TZIC_WAKEUP0(i)); + v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) : + wakeup_intr[i]; + __raw_writel(v, tzic_base + TZIC_WAKEUP0(i)); } return 0; diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c index 0732c6c8d511..ef32686feef9 100644 --- a/arch/arm/plat-pxa/pwm.c +++ b/arch/arm/plat-pxa/pwm.c @@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm) static int __devinit pwm_probe(struct platform_device *pdev) { - struct platform_device_id *id = platform_get_device_id(pdev); + const struct platform_device_id *id = platform_get_device_id(pdev); struct pwm_device *pwm, *secondary = NULL; struct resource *r; int ret = 0; diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 48cbdcb6bbd4..55590a4d87c9 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Mon Jul 12 21:10:14 2010 +# Last update: Thu Sep 9 22:43:01 2010 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634 gw2388 MACH_GW2388 GW2388 2635 jadecpu MACH_JADECPU JADECPU 2636 carlisle MACH_CARLISLE CARLISLE 2637 -lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638 +lux_sf9 MACH_LUX_SF9 LUX_SF9 2638 nemid_tb MACH_NEMID_TB NEMID_TB 2639 terrier MACH_TERRIER TERRIER 2640 turbot MACH_TURBOT TURBOT 2641 @@ -2950,3 +2950,97 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963 netviz MACH_NETVIZ NETVIZ 2964 flexibity MACH_FLEXIBITY FLEXIBITY 2965 wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 +lpc24xx MACH_LPC24XX LPC24XX 2967 +spica MACH_SPICA SPICA 2968 +gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969 +bipnet MACH_BIPNET BIPNET 2970 +overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971 +davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972 +pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973 +ptx7545 MACH_PTX7545 PTX7545 2974 +tm_efdc MACH_TM_EFDC TM_EFDC 2975 +omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977 +flyer MACH_FLYER FLYER 2978 +tornado3240 MACH_TORNADO3240 TORNADO3240 2979 +soli_01 MACH_SOLI_01 SOLI_01 2980 +omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981 +helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982 +netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983 +ssc MACH_SSC SSC 2984 +premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985 +wasabi MACH_WASABI WASABI 2986 +vivow MACH_VIVOW VIVOW 2987 +mx50_rdp MACH_MX50_RDP MX50_RDP 2988 +universal MACH_UNIVERSAL UNIVERSAL 2989 +real6410 MACH_REAL6410 REAL6410 2990 +spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991 +ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992 +omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993 +thebe MACH_THEBE THEBE 2994 +rv082 MACH_RV082 RV082 2995 +armlguest MACH_ARMLGUEST ARMLGUEST 2996 +tjinc1000 MACH_TJINC1000 TJINC1000 2997 +dockstar MACH_DOCKSTAR DOCKSTAR 2998 +ax8008 MACH_AX8008 AX8008 2999 +gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000 +pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 +ea20 MACH_EA20 EA20 3002 +awm2 MACH_AWM2 AWM2 3003 +ti8148evm MACH_TI8148EVM TI8148EVM 3004 +tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 +linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 +tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 +rubys MACH_RUBYS RUBYS 3008 +aquarius MACH_AQUARIUS AQUARIUS 3009 +mx53_ard MACH_MX53_ARD MX53_ARD 3010 +mx53_smd MACH_MX53_SMD MX53_SMD 3011 +lswxl MACH_LSWXL LSWXL 3012 +dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013 +sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014 +jocpu550 MACH_JOCPU550 JOCPU550 3015 +msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016 +msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017 +yanomami MACH_YANOMAMI YANOMAMI 3018 +gta04 MACH_GTA04 GTA04 3019 +cm_a510 MACH_CM_A510 CM_A510 3020 +omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021 +kx33xx MACH_KX33XX KX33XX 3022 +ptx7510 MACH_PTX7510 PTX7510 3023 +top9000 MACH_TOP9000 TOP9000 3024 +teenote MACH_TEENOTE TEENOTE 3025 +ts3 MACH_TS3 TS3 3026 +a0 MACH_A0 A0 3027 +fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028 +fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029 +frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030 +remus MACH_REMUS REMUS 3031 +at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032 +at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033 +kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034 +oratisrouter MACH_ORATISROUTER ORATISROUTER 3035 +armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036 +spdm MACH_SPDM SPDM 3037 +gtib MACH_GTIB GTIB 3038 +dgm3240 MACH_DGM3240 DGM3240 3039 +atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040 +htcmega MACH_HTCMEGA HTCMEGA 3041 +tricorder MACH_TRICORDER TRICORDER 3042 +tx28 MACH_TX28 TX28 3043 +bstbrd MACH_BSTBRD BSTBRD 3044 +pwb3090 MACH_PWB3090 PWB3090 3045 +idea6410 MACH_IDEA6410 IDEA6410 3046 +qbc9263 MACH_QBC9263 QBC9263 3047 +borabora MACH_BORABORA BORABORA 3048 +valdez MACH_VALDEZ VALDEZ 3049 +ls9g20 MACH_LS9G20 LS9G20 3050 +mios_v1 MACH_MIOS_V1 MIOS_V1 3051 +s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052 +controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053 +tin307 MACH_TIN307 TIN307 3054 +tin510 MACH_TIN510 TIN510 3055 +bluecheese MACH_BLUECHEESE BLUECHEESE 3057 +tem3x30 MACH_TEM3X30 TEM3X30 3058 +harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059 +msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060 +spear900 MACH_SPEAR900 SPEAR900 3061 +pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062 diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h index a67aeed17d40..debc5ed96d6e 100644 --- a/arch/powerpc/include/asm/fsldma.h +++ b/arch/powerpc/include/asm/fsldma.h @@ -11,6 +11,7 @@ #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ #define __ARCH_POWERPC_ASM_FSLDMA_H__ +#include <linux/slab.h> #include <linux/dmaengine.h> /* diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 50794137d710..675c9e11ada5 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs) { siginfo_t info; - lock_kernel(); #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); #endif @@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs) #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); #endif - unlock_kernel(); } asmlinkage int diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index f8514e291e15..12b9f352595f 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir; - lock_kernel(); if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || (((insn >> 30) & 3) != 3)) goto kill_user; @@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) kill_user: user_mna_trap_fault(regs, insn); out: - unlock_kernel(); + ; } diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index f24d298bda29..b351770cbdd6 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c @@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) struct thread_info *tp = current_thread_info(); int window; - lock_kernel(); flush_user_windows(); for(window = 0; window < tp->w_saved; window++) { unsigned long sp = tp->rwbuf_stkptrs[window]; @@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) do_exit(SIGILL); } tp->w_saved = 0; - unlock_kernel(); } diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index f35eb45d6576..c4191b3b7056 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h @@ -26,11 +26,11 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> -void * +void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void -iounmap_atomic(void *kvaddr, enum km_type type); +iounmap_atomic(void __iomem *kvaddr, enum km_type type); int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 51cfd730ac5d..1f99ecfc48e1 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -152,9 +152,14 @@ struct x86_emulate_ops { struct operand { enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; unsigned int bytes; - unsigned long orig_val, *ptr; + union { + unsigned long orig_val; + u64 orig_val64; + }; + unsigned long *ptr; union { unsigned long val; + u64 val64; char valptr[sizeof(unsigned long) + 2]; }; }; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 224392d8fe8c..5e975298fa81 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) err = -ENOMEM; goto out; } - if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) { kfree(b); err = -ENOMEM; goto out; @@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) #ifndef CONFIG_SMP cpumask_setall(b->cpus); #else - cpumask_copy(b->cpus, c->llc_shared_map); + cpumask_set_cpu(cpu, b->cpus); #endif per_cpu(threshold_banks, cpu)[bank] = b; diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index c2a8b26d4fea..d9368eeda309 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level) #ifdef CONFIG_SYSFS /* Add/Remove thermal_throttle interface for CPU device: */ -static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) +static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, + unsigned int cpu) { int err; - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); + struct cpuinfo_x86 *c = &cpu_data(cpu); err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); if (err) @@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&therm_cpu_lock); - err = thermal_throttle_add_dev(sys_dev); + err = thermal_throttle_add_dev(sys_dev, cpu); mutex_unlock(&therm_cpu_lock); WARN_ON(err); break; @@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void) #endif /* connect live CPUs to sysfs */ for_each_online_cpu(cpu) { - err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); + err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu); WARN_ON(err); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f2da20fda02d..3efdf2870a35 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) /* * event overflow */ - handled = 1; + handled++; data.period = event->hw.last_period; if (!x86_perf_event_set_period(event)) @@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void) apic_write(APIC_LVTPC, APIC_DM_NMI); } +struct pmu_nmi_state { + unsigned int marked; + int handled; +}; + +static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); + static int __kprobes perf_event_nmi_handler(struct notifier_block *self, unsigned long cmd, void *__args) { struct die_args *args = __args; - struct pt_regs *regs; + unsigned int this_nmi; + int handled; if (!atomic_read(&active_events)) return NOTIFY_DONE; @@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self, case DIE_NMI: case DIE_NMI_IPI: break; - + case DIE_NMIUNKNOWN: + this_nmi = percpu_read(irq_stat.__nmi_count); + if (this_nmi != __get_cpu_var(pmu_nmi).marked) + /* let the kernel handle the unknown nmi */ + return NOTIFY_DONE; + /* + * This one is a PMU back-to-back nmi. Two events + * trigger 'simultaneously' raising two back-to-back + * NMIs. If the first NMI handles both, the latter + * will be empty and daze the CPU. So, we drop it to + * avoid false-positive 'unknown nmi' messages. + */ + return NOTIFY_STOP; default: return NOTIFY_DONE; } - regs = args->regs; - apic_write(APIC_LVTPC, APIC_DM_NMI); - /* - * Can't rely on the handled return value to say it was our NMI, two - * events could trigger 'simultaneously' raising two back-to-back NMIs. - * - * If the first NMI handles both, the latter will be empty and daze - * the CPU. - */ - x86_pmu.handle_irq(regs); + + handled = x86_pmu.handle_irq(args->regs); + if (!handled) + return NOTIFY_DONE; + + this_nmi = percpu_read(irq_stat.__nmi_count); + if ((handled > 1) || + /* the next nmi could be a back-to-back nmi */ + ((__get_cpu_var(pmu_nmi).marked == this_nmi) && + (__get_cpu_var(pmu_nmi).handled > 1))) { + /* + * We could have two subsequent back-to-back nmis: The + * first handles more than one counter, the 2nd + * handles only one counter and the 3rd handles no + * counter. + * + * This is the 2nd nmi because the previous was + * handling more than one counter. We will mark the + * next (3rd) and then drop it if unhandled. + */ + __get_cpu_var(pmu_nmi).marked = this_nmi + 1; + __get_cpu_var(pmu_nmi).handled = handled; + } return NOTIFY_STOP; } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d8d86d014008..ee05c90012d2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) struct perf_sample_data data; struct cpu_hw_events *cpuc; int bit, loops; - u64 ack, status; + u64 status; + int handled = 0; perf_sample_data_init(&data, 0); @@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) loops = 0; again: + intel_pmu_ack_status(status); if (++loops > 100) { WARN_ONCE(1, "perfevents: irq loop stuck!\n"); perf_event_print_debug(); @@ -736,19 +738,22 @@ again: } inc_irq_stat(apic_perf_irqs); - ack = status; intel_pmu_lbr_read(); /* * PEBS overflow sets bit 62 in the global status register */ - if (__test_and_clear_bit(62, (unsigned long *)&status)) + if (__test_and_clear_bit(62, (unsigned long *)&status)) { + handled++; x86_pmu.drain_pebs(regs); + } for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; + handled++; + if (!test_bit(bit, cpuc->active_mask)) continue; @@ -761,8 +766,6 @@ again: x86_pmu_stop(event); } - intel_pmu_ack_status(ack); - /* * Repeat if there is more work to be done: */ @@ -772,7 +775,7 @@ again: done: intel_pmu_enable_all(0); - return 1; + return handled; } static struct event_constraint * diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 7e578e9cc58b..b560db3305be 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) inc_irq_stat(apic_perf_irqs); } - return handled > 0; + return handled; } /* diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index a874495b3673..e2a595257390 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void) /* Copy kernel address range */ clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min_t(unsigned long, KERNEL_PGD_PTRS, - KERNEL_PGD_BOUNDARY)); + KERNEL_PGD_PTRS); /* Initialize low mappings */ clone_pgd_range(trampoline_pg_dir, diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index d632934cb638..26a863a9c2a8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -655,7 +655,7 @@ void restore_sched_clock_state(void) local_irq_save(flags); - get_cpu_var(cyc2ns_offset) = 0; + __get_cpu_var(cyc2ns_offset) = 0; offset = cyc2ns_suspend - sched_clock(); for_each_possible_cpu(cpu) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b38bd8b92aa6..66ca98aafdd6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { struct decode_cache *c = &ctxt->decode; - u64 old = c->dst.orig_val; + u64 old = c->dst.orig_val64; if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { - c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { - c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | - (u32) c->regs[VCPU_REGS_RBX]; + c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) | + (u32) c->regs[VCPU_REGS_RBX]; ctxt->eflags |= EFLG_ZF; } @@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) c->src.valptr, c->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; - c->src.orig_val = c->src.val; + c->src.orig_val64 = c->src.val64; } if (c->src2.type == OP_MEM) { diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 8d10c063d7f2..4b7b73ce2098 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s) if (!found) found = s->kvm->bsp_vcpu; + if (!found) + return; + kvm_vcpu_kick(found); } } diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index ffed06871c5c..63c314502993 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -43,7 +43,6 @@ struct kvm_kpic_state { u8 irr; /* interrupt request register */ u8 imr; /* interrupt mask register */ u8 isr; /* interrupt service register */ - u8 isr_ack; /* interrupt ack detection */ u8 priority_add; /* highest irq priority */ u8 irq_base; u8 read_reg_select; @@ -56,6 +55,7 @@ struct kvm_kpic_state { u8 init4; /* true if 4 byte init */ u8 elcr; /* PIIX edge/trigger selection */ u8 elcr_mask; + u8 isr_ack; /* interrupt ack detection */ struct kvm_pic *pics_state; }; diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 84e236ce76ba..72fc70cf6184 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) /* * Map 'pfn' using fixed map 'type' and protections 'prot' */ -void * +void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) { /* @@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) prot = PAGE_KERNEL_UC_MINUS; - return kmap_atomic_prot_pfn(pfn, type, prot); + return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); } EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); void -iounmap_atomic(void *kvaddr, enum km_type type) +iounmap_atomic(void __iomem *kvaddr, enum km_type type) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index f6b48f6c5951..cfe4faabb0f6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -568,8 +568,13 @@ static int __init init_sysfs(void) int error; error = sysdev_class_register(&oprofile_sysclass); - if (!error) - error = sysdev_register(&device_oprofile); + if (error) + return error; + + error = sysdev_register(&device_oprofile); + if (error) + sysdev_class_unregister(&oprofile_sysclass); + return error; } @@ -580,8 +585,10 @@ static void exit_sysfs(void) } #else -#define init_sysfs() do { } while (0) -#define exit_sysfs() do { } while (0) + +static inline int init_sysfs(void) { return 0; } +static inline void exit_sysfs(void) { } + #endif /* CONFIG_PM */ static int __init p4_init(char **cpu_type) @@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops) char *cpu_type = NULL; int ret = 0; + using_nmi = 0; + if (!cpu_has_apic) return -ENODEV; @@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) mux_init(ops); - init_sysfs(); + ret = init_sysfs(); + if (ret) + return ret; + using_nmi = 1; printk(KERN_INFO "oprofile: using NMI interrupt.\n"); return 0; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index a6809645d212..2fef1ef931a0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) /* Currently we do not support hierarchy deeper than two level (0,1) */ if (parent != cgroup->top_cgroup) - return ERR_PTR(-EINVAL); + return ERR_PTR(-EPERM); blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); if (!blkcg) diff --git a/block/blk-core.c b/block/blk-core.c index ee1a1e7e63cc..32a1c123dfb3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) int el_ret; unsigned int bytes = bio->bi_size; const unsigned short prio = bio_prio(bio); - const bool sync = (bio->bi_rw & REQ_SYNC); - const bool unplug = (bio->bi_rw & REQ_UNPLUG); - const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; + const bool sync = !!(bio->bi_rw & REQ_SYNC); + const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); + const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; int rw_flags; if ((bio->bi_rw & REQ_HARDBARRIER) && diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 001ab18078f5..0749b89c6885 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk) kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); + kobject_put(&dev->kobj); return ret; } diff --git a/block/blk.h b/block/blk.h index 6e7dc87141e4..d6b911ac002c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) static inline int blk_cpu_to_group(int cpu) { + int group = NR_CPUS; #ifdef CONFIG_SCHED_MC const struct cpumask *mask = cpu_coregroup_mask(cpu); - return cpumask_first(mask); + group = cpumask_first(mask); #elif defined(CONFIG_SCHED_SMT) - return cpumask_first(topology_thread_cpumask(cpu)); + group = cpumask_first(topology_thread_cpumask(cpu)); #else return cpu; #endif + if (likely(group < NR_CPUS)) + return group; + return cpu; } /* diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index eb4086f7dfef..f65c6f01c475 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; static int cfq_slice_idle = HZ / 125; +static int cfq_group_idle = HZ / 125; static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ static const int cfq_hist_divisor = 4; @@ -147,6 +148,8 @@ struct cfq_queue { struct cfq_queue *new_cfqq; struct cfq_group *cfqg; struct cfq_group *orig_cfqg; + /* Number of sectors dispatched from queue in single dispatch round */ + unsigned long nr_sectors; }; /* @@ -198,6 +201,8 @@ struct cfq_group { struct hlist_node cfqd_node; atomic_t ref; #endif + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; }; /* @@ -271,6 +276,7 @@ struct cfq_data { unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; + unsigned int cfq_group_idle; unsigned int cfq_latency; unsigned int cfq_group_isolation; @@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy); &cfqg->service_trees[i][j]: NULL) \ +static inline bool iops_mode(struct cfq_data *cfqd) +{ + /* + * If we are not idling on queues and it is a NCQ drive, parallel + * execution of requests is on and measuring time is not possible + * in most of the cases until and unless we drive shallower queue + * depths and that becomes a performance bottleneck. In such cases + * switch to start providing fairness in terms of number of IOs. + */ + if (!cfqd->cfq_slice_idle && cfqd->hw_tag) + return true; + else + return false; +} + static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) { if (cfq_class_idle(cfqq)) @@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) slice_used = cfqq->allocated_slice; } - cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used); return slice_used; } @@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, struct cfq_queue *cfqq) { struct cfq_rb_root *st = &cfqd->grp_service_tree; - unsigned int used_sl, charge_sl; + unsigned int used_sl, charge; int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) - cfqg->service_tree_idle.count; BUG_ON(nr_sync < 0); - used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); + used_sl = charge = cfq_cfqq_slice_usage(cfqq); - if (!cfq_cfqq_sync(cfqq) && !nr_sync) - charge_sl = cfqq->allocated_slice; + if (iops_mode(cfqd)) + charge = cfqq->slice_dispatch; + else if (!cfq_cfqq_sync(cfqq) && !nr_sync) + charge = cfqq->allocated_slice; /* Can't update vdisktime while group is on service tree */ cfq_rb_erase(&cfqg->rb_node, st); - cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); + cfqg->vdisktime += cfq_scale_slice(charge, cfqg); __cfq_group_service_tree_add(st, cfqg); /* This group is being expired. Save the context */ @@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, st->min_vdisktime); + cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" + " sect=%u", used_sl, cfqq->slice_dispatch, charge, + iops_mode(cfqd), cfqq->nr_sectors); cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); cfq_blkiocg_set_start_empty_time(&cfqg->blkg); } @@ -1587,6 +1612,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; + cfqq->nr_sectors = 0; cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_dispatch(cfqq); @@ -1839,6 +1865,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) BUG_ON(!service_tree); BUG_ON(!service_tree->count); + if (!cfqd->cfq_slice_idle) + return false; + /* We never do for idle class queues. */ if (prio == IDLE_WORKLOAD) return false; @@ -1863,7 +1892,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_io_context *cic; - unsigned long sl; + unsigned long sl, group_idle = 0; /* * SSD device without seek penalty, disable idling. But only do so @@ -1879,8 +1908,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) /* * idle is disabled, either manually or by past process history */ - if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) - return; + if (!cfq_should_idle(cfqd, cfqq)) { + /* no queue idling. Check for group idling */ + if (cfqd->cfq_group_idle) + group_idle = cfqd->cfq_group_idle; + else + return; + } /* * still active requests from this queue, don't idle @@ -1907,13 +1941,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) return; } + /* There are other queues in the group, don't do group idle */ + if (group_idle && cfqq->cfqg->nr_cfqq > 1) + return; + cfq_mark_cfqq_wait_request(cfqq); - sl = cfqd->cfq_slice_idle; + if (group_idle) + sl = cfqd->cfq_group_idle; + else + sl = cfqd->cfq_slice_idle; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); - cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); + cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, + group_idle ? 1 : 0); } /* @@ -1929,9 +1971,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_remove_request(rq); cfqq->dispatched++; + (RQ_CFQG(rq))->dispatched++; elv_dispatch_sort(q, rq); cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; + cfqq->nr_sectors += blk_rq_sectors(rq); cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), rq_data_dir(rq), rq_is_sync(rq)); } @@ -2198,7 +2242,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) cfqq = NULL; goto keep_queue; } else - goto expire; + goto check_group_idle; } /* @@ -2226,8 +2270,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ - if (timer_pending(&cfqd->idle_slice_timer) || - (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { + if (timer_pending(&cfqd->idle_slice_timer)) { + cfqq = NULL; + goto keep_queue; + } + + if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } + + /* + * If group idle is enabled and there are requests dispatched from + * this group, wait for requests to complete. + */ +check_group_idle: + if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 + && cfqq->cfqg->dispatched) { cfqq = NULL; goto keep_queue; } @@ -3375,6 +3434,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) WARN_ON(!cfqq->dispatched); cfqd->rq_in_driver--; cfqq->dispatched--; + (RQ_CFQG(rq))->dispatched--; cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), rq_io_start_time_ns(rq), rq_data_dir(rq), rq_is_sync(rq)); @@ -3404,7 +3464,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) * the queue. */ if (cfq_should_wait_busy(cfqd, cfqq)) { - cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; + unsigned long extend_sl = cfqd->cfq_slice_idle; + if (!cfqd->cfq_slice_idle) + extend_sl = cfqd->cfq_group_idle; + cfqq->slice_end = jiffies + extend_sl; cfq_mark_cfqq_wait_busy(cfqq); cfq_log_cfqq(cfqd, cfqq, "will busy wait"); } @@ -3850,6 +3913,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; + cfqd->cfq_group_idle = cfq_group_idle; cfqd->cfq_latency = 1; cfqd->cfq_group_isolation = 0; cfqd->hw_tag = -1; @@ -3922,6 +3986,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); +SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); @@ -3954,6 +4019,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, @@ -3975,6 +4041,7 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), + CFQ_ATTR(group_idle), CFQ_ATTR(low_latency), CFQ_ATTR(group_isolation), __ATTR_NULL @@ -4028,6 +4095,12 @@ static int __init cfq_init(void) if (!cfq_slice_idle) cfq_slice_idle = 1; +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (!cfq_group_idle) + cfq_group_idle = 1; +#else + cfq_group_idle = 0; +#endif if (cfq_slab_setup()) return -ENOMEM; diff --git a/block/elevator.c b/block/elevator.c index ec585c9554d3..205b09a5bd9e 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) { struct elevator_queue *old_elevator, *e; void *data; + int err; /* * Allocate new elevator */ e = elevator_alloc(q, new_e); if (!e) - return 0; + return -ENOMEM; data = elevator_init_queue(q, e); if (!data) { kobject_put(&e->kobj); - return 0; + return -ENOMEM; } /* @@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) __elv_unregister_queue(old_elevator); - if (elv_register_queue(q)) + err = elv_register_queue(q); + if (err) goto fail_register; /* @@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); - return 1; + return 0; fail_register: /* @@ -1071,17 +1073,19 @@ fail_register: queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); spin_unlock_irq(q->queue_lock); - return 0; + return err; } -ssize_t elv_iosched_store(struct request_queue *q, const char *name, - size_t count) +/* + * Switch this queue to the given IO scheduler. + */ +int elevator_change(struct request_queue *q, const char *name) { char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; if (!q->elevator) - return count; + return -ENXIO; strlcpy(elevator_name, name, sizeof(elevator_name)); e = elevator_get(strstrip(elevator_name)); @@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { elevator_put(e); - return count; + return 0; } - if (!elevator_switch(q, e)) - printk(KERN_ERR "elevator: switch to %s failed\n", - elevator_name); - return count; + return elevator_switch(q, e); +} +EXPORT_SYMBOL(elevator_change); + +ssize_t elv_iosched_store(struct request_queue *q, const char *name, + size_t count) +{ + int ret; + + if (!q->elevator) + return count; + + ret = elevator_change(q, name); + if (!ret) + return count; + + printk(KERN_ERR "elevator: switch to %s failed\n", name); + return ret; } ssize_t elv_iosched_show(struct request_queue *q, char *name) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 013727b20417..ff1c945fba98 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -253,6 +253,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ + { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 3971bc0a4838..d712675d0a96 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (CPT) */ { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (PBG) */ + { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (PBG) */ + { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, { } /* terminate list */ }; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 666850d31df2..68dc6785472f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1326,7 +1326,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, /* issue the first D2H Register FIS */ msecs = 0; now = jiffies; - if (time_after(now, deadline)) + if (time_after(deadline, now)) msecs = jiffies_to_msecs(deadline - now); tf.ctl |= ATA_SRST; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c035b3d041ee..932eaee50245 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, */ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) { + unsigned int ehi_flags = ATA_EHI_QUIET; int rc; /* @@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) */ ata_lpm_enable(host); - rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); + /* + * On some hardware, device fails to respond after spun down + * for suspend. As the device won't be used before being + * resumed, we don't need to touch the device. Ask EH to skip + * the usual stuff and proceed directly to suspend. + * + * http://thread.gmane.org/gmane.linux.ide/46764 + */ + if (mesg.event == PM_EVENT_SUSPEND) + ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; + + rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); if (rc == 0) host->dev->power.power_state = mesg; return rc; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c9ae299b8342..e48302eae55f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link) if (link->flags & ATA_LFLAG_DISABLED) return 1; + /* skip if explicitly requested */ + if (ehc->i.flags & ATA_EHI_NO_RECOVERY) + return 1; + /* thaw frozen port and recover failed devices */ if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) return 0; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 3b82d8ef76f0..e30c537cce32 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) if (ioaddr->ctl_addr) iowrite8(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; + ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { @@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->device, ioaddr->device_addr); VPRINTK("device 0x%X\n", tf->device); } + + ata_wait_idle(ap); } EXPORT_SYMBOL_GPL(ata_sff_tf_load); @@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq) { - struct ata_eh_info *ehi = &ap->link.eh_info; + struct ata_link *link = qc->dev->link; + struct ata_eh_info *ehi = &link->eh_info; unsigned long flags = 0; int poll_next; @@ -1298,8 +1302,14 @@ fsm_start: } EXPORT_SYMBOL_GPL(ata_sff_hsm_move); -void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) +void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) { + struct ata_port *ap = link->ap; + + WARN_ON((ap->sff_pio_task_link != NULL) && + (ap->sff_pio_task_link != link)); + ap->sff_pio_task_link = link; + /* may fail if ata_sff_flush_pio_task() in progress */ queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, msecs_to_jiffies(delay)); @@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work) { struct ata_port *ap = container_of(work, struct ata_port, sff_pio_task.work); + struct ata_link *link = ap->sff_pio_task_link; struct ata_queued_cmd *qc; u8 status; int poll_next; + BUG_ON(ap->sff_pio_task_link == NULL); /* qc can be NULL if timeout occurred */ - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (!qc) + qc = ata_qc_from_tag(ap, link->active_tag); + if (!qc) { + ap->sff_pio_task_link = NULL; return; + } fsm_start: WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); @@ -1345,11 +1359,16 @@ fsm_start: msleep(2); status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { - ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); return; } } + /* + * hsm_move() may trigger another command to be processed. + * clean the link beforehand. + */ + ap->sff_pio_task_link = NULL; /* move the HSM */ poll_next = ata_sff_hsm_move(ap, qc, status, 1); @@ -1376,6 +1395,7 @@ fsm_start: unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; /* Use polling pio if the LLD doesn't handle * interrupt driven pio and atapi CDB interrupt. @@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST_LAST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); break; @@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_WRITE) { /* PIO data out protocol */ ap->hsm_task_state = HSM_ST_FIRST; - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); /* always send first data block using the * ata_sff_pio_task() codepath. @@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); /* if polling, ata_sff_pio_task() handles the * rest. otherwise, interrupt handler takes @@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || (qc->tf.flags & ATA_TFLAG_POLLING)) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); break; default: @@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; /* defer PIO handling to sff_qc_issue */ if (!ata_is_dma(qc->tf.protocol)) @@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); break; default: diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index ba43f0f8c880..2215632e4b31 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline) struct pci_dev *pdev = to_pci_dev(ap->host->dev); /* Odd numbered device ids are the units with enable bits (the -R cards) */ - if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) + if ((pdev->device & 1) && + !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 5e659885de16..ac8d7d97e408 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) tf->lbam, tf->lbah); } + + ata_wait_idle(ap); } static int via_port_start(struct ata_port *ap) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 81982594a014..a9fd9709c262 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) } if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_sff_queue_pio_task(ap, 0); + ata_sff_queue_pio_task(link, 0); return 0; } diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 31064df1370a..6124c2fd2d33 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h, spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; + if (h->Qdepth > h->maxQsinceinit) + h->maxQsinceinit = h->Qdepth; start_io(h); spin_unlock_irqrestore(&h->lock, flags); } @@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + /* The doorbell reset seems to cause lockups on some Smart + * Arrays (e.g. P410, P410i, maybe others). Until this is + * fixed or at least isolated, avoid the doorbell reset. + */ + use_doorbell = 0; + rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; @@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, h->scatter_list = kmalloc(h->max_commands * sizeof(struct scatterlist *), GFP_KERNEL); + if (!h->scatter_list) + goto clean4; + for (k = 0; k < h->nr_cmds; k++) { h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * h->maxsgentries, diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f3c636d23718..91797bbbe702 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { - bool barrier = (bio->bi_rw & REQ_HARDBARRIER); + bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); struct file *file = lo->lo_backing_file; if (barrier) { diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index b82c5ce5e9df..76fa3deaee84 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev) host->breq->queuedata = host; /* mflash is random device, thanx for the noop */ - elevator_exit(host->breq->elevator); - err = elevator_init(host->breq, "noop"); + err = elevator_change(host->breq, "noop"); if (err) { printk(KERN_ERR "%s:%d (elevator_init) fail\n", __func__, __LINE__); diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 2bbeaaea46e9..38df8c19e74c 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, case KIOCSOUND: if (!perm) goto eperm; - /* FIXME: This is an old broken API but we need to keep it - supported and somehow separate the historic advertised - tick rate from any real one */ + /* + * The use of PIT_TICK_RATE is historic, it used to be + * the platform-dependent CLOCK_TICK_RATE between 2.6.12 + * and 2.6.36, which was a minor but unfortunate ABI + * change. + */ if (arg) - arg = CLOCK_TICK_RATE / arg; + arg = PIT_TICK_RATE / arg; kd_mksound(arg, 0); break; @@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, */ ticks = HZ * ((arg >> 16) & 0xffff) / 1000; count = ticks ? (arg & 0xffff) : 0; - /* FIXME: This is an old broken API but we need to keep it - supported and somehow separate the historic advertised - tick rate from any real one */ if (count) - count = CLOCK_TICK_RATE / count; + count = PIT_TICK_RATE / count; kd_mksound(count, ticks); break; } diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c index b42f42ca70c3..823559ab0e24 100644 --- a/drivers/gpio/sx150x.c +++ b/drivers/gpio/sx150x.c @@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg) return err; } -static int sx150x_init_hw(struct sx150x_chip *chip, - struct sx150x_platform_data *pdata) +static int sx150x_reset(struct sx150x_chip *chip) { - int err = 0; + int err; - err = i2c_smbus_write_word_data(chip->client, + err = i2c_smbus_write_byte_data(chip->client, chip->dev_cfg->reg_reset, - 0x3412); + 0x12); if (err < 0) return err; + err = i2c_smbus_write_byte_data(chip->client, + chip->dev_cfg->reg_reset, + 0x34); + return err; +} + +static int sx150x_init_hw(struct sx150x_chip *chip, + struct sx150x_platform_data *pdata) +{ + int err = 0; + + if (pdata->reset_during_probe) { + err = sx150x_reset(chip); + if (err < 0) + return err; + } + err = sx150x_i2c_write(chip->client, chip->dev_cfg->reg_misc, 0x01); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 59457e83b011..744225ebb4b2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data) i915_seqno_passed(i915_get_gem_seqno(dev, &dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) { + bool missed_wakeup = false; + dev_priv->hangcheck_count = 0; /* Issue a wake-up to catch stuck h/w. */ - if (dev_priv->render_ring.waiting_gem_seqno | - dev_priv->bsd_ring.waiting_gem_seqno) { - DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); - if (dev_priv->render_ring.waiting_gem_seqno) - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - if (dev_priv->bsd_ring.waiting_gem_seqno) - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + if (dev_priv->render_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->render_ring.irq_queue)) { + DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + missed_wakeup = true; + } + + if (dev_priv->bsd_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { + DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + missed_wakeup = true; } + + if (missed_wakeup) + DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); return; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d094e9129223..4f5e15577e89 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2206,9 +2206,17 @@ #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) +#define WM1_LP_FBC_LP1_MASK (0xf<<20) +#define WM1_LP_FBC_LP1_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) +#define WM2_LP_ILK 0x4510c +#define WM2_LP_EN (1<<31) +#define WM3_LP_ILK 0x45110 +#define WM3_LP_EN (1<<31) +#define WM1S_LP_ILK 0x45120 +#define WM1S_LP_EN (1<<31) /* Memory latency timer register */ #define MLTR_ILK 0x11222 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 40cc5da264a9..19daead5b525 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2767,14 +2767,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, /* Don't promote wm_size to unsigned... */ if (wm_size > (long)wm->max_wm) wm_size = wm->max_wm; - if (wm_size <= 0) { + if (wm_size <= 0) wm_size = wm->default_wm; - DRM_ERROR("Insufficient FIFO for plane, expect flickering:" - " entries required = %ld, available = %lu.\n", - entries_required + wm->guard_size, - wm->fifo_size); - } - return wm_size; } @@ -3388,8 +3382,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, reg_value = I915_READ(WM1_LP_ILK); reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | WM1_LP_CURSOR_MASK); - reg_value |= WM1_LP_SR_EN | - (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | + reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; I915_WRITE(WM1_LP_ILK, reg_value); @@ -5675,6 +5668,9 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); } /* * Based on the document from hardware guys the following bits @@ -5696,8 +5692,7 @@ void intel_init_clock_gating(struct drm_device *dev) ILK_DPFC_DIS2 | ILK_CLK_FBC); } - if (IS_GEN6(dev)) - return; + return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -5758,11 +5753,9 @@ void intel_init_clock_gating(struct drm_device *dev) OUT_RING(MI_FLUSH); ADVANCE_LP_RING(); } - } else { + } else DRM_DEBUG_KMS("Failed to allocate render context." - "Disable RC6\n"); - return; - } + "Disable RC6\n"); } if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b819c1081147..4fbb0165b26f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -875,8 +875,6 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); intel_encoder->crtc_mask = (1 << 1); - if (IS_I965G(dev)) - intel_encoder->crtc_mask |= (1 << 0); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 7580f55e67e3..36e957532230 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c @@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), + AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted), + AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd), { NULL, } /* Laptop models without axis info (yet): * "NC6910" "HP Compaq 6910" diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 8f0caf7d4482..78fbe9ffe7f0 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -53,7 +53,7 @@ #define T3_MAX_PBL_SIZE 256 #define T3_MAX_RQ_SIZE 1024 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) -#define T3_MAX_CQ_DEPTH 262144 +#define T3_MAX_CQ_DEPTH 65536 #define T3_MAX_NUM_STAG (1<<15) #define T3_MAX_MR_SIZE 0x100000000ULL #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 443cea55daac..61e0efd4ccfb 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, static void nes_retrans_expired(struct nes_cm_node *cm_node) { struct iw_cm_id *cm_id = cm_node->cm_id; - switch (cm_node->state) { + enum nes_cm_node_state state = cm_node->state; + cm_node->state = NES_CM_STATE_CLOSED; + switch (state) { case NES_CM_STATE_SYN_RCVD: case NES_CM_STATE_CLOSING: rem_ref_cm_node(cm_node->cm_core, cm_node); @@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node) case NES_CM_STATE_FIN_WAIT1: if (cm_node->cm_id) cm_id->rem_ref(cm_id); - cm_node->state = NES_CM_STATE_CLOSED; send_reset(cm_node, NULL); break; default: @@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, break; case NES_CM_STATE_MPAREQ_RCVD: passive_state = atomic_add_return(1, &cm_node->passive_state); - if (passive_state == NES_SEND_RESET_EVENT) - create_event(cm_node, NES_CM_EVENT_RESET); - cm_node->state = NES_CM_STATE_CLOSED; dev_kfree_skb_any(skb); break; case NES_CM_STATE_ESTABLISHED: @@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, case NES_CM_STATE_CLOSED: drop_packet(skb); break; + case NES_CM_STATE_FIN_WAIT2: case NES_CM_STATE_FIN_WAIT1: case NES_CM_STATE_LAST_ACK: cm_node->cm_id->rem_ref(cm_node->cm_id); @@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) return -EINVAL; } + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == NES_SEND_RESET_EVENT) { + rem_ref_cm_node(cm_node->cm_core, cm_node); + return -ECONNRESET; + } + /* associate the node with the QP */ nesqp->cm_node = (void *)cm_node; cm_node->nesqp = nesqp; @@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " "ret=%d\n", __func__, __LINE__, ret); - passive_state = atomic_add_return(1, &cm_node->passive_state); - if (passive_state == NES_SEND_RESET_EVENT) - create_event(cm_node, NES_CM_EVENT_RESET); return 0; } diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index f8233c851c69..1980a461c499 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, return; /* Ignore it, wait for close complete */ if (atomic_inc_return(&nesqp->close_timer_started) == 1) { + if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) && + (nesqp->ibqp_state == IB_QPS_RTS) && + ((nesadapter->eeprom_version >> 16) != NES_A0)) { + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; + nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); + nes_cm_disconn(nesqp); + } nesqp->cm_id->add_ref(nesqp->cm_id); schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, NES_TIMER_TYPE_CLOSE, 1, 0); @@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), async_event_id, nesqp->last_aeq, tcp_state); } - break; case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: if (nesqp->term_flags) { diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index aa9183db32b1..1204c3432b63 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -45,6 +45,7 @@ #define NES_PHY_TYPE_KR 9 #define NES_MULTICAST_PF_MAX 8 +#define NES_A0 3 enum pci_regs { NES_INT_STAT = 0x0000, diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 6dfdd49cdbcf..10560c796fd6 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; nes_write_indexed(nesdev, - NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); + NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); nesdev->disable_tx_flow_control = 0; } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; nes_write_indexed(nesdev, - NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); + NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp); nesdev->disable_tx_flow_control = 1; } if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { diff --git a/drivers/input/input.c b/drivers/input/input.c index a9b025f4147a..ab6982056518 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device); * @dev: input device supporting MT events and finger tracking * @num_slots: number of slots used by the device * - * This function allocates all necessary memory for MT slot handling - * in the input device, and adds ABS_MT_SLOT to the device capabilities. + * This function allocates all necessary memory for MT slot handling in the + * input device, and adds ABS_MT_SLOT to the device capabilities. All slots + * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1. */ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) { + int i; + if (!num_slots) return 0; @@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) dev->mtsize = num_slots; input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); + /* Mark slots as 'unused' */ + for (i = 0; i < num_slots; i++) + dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1; + return 0; } EXPORT_SYMBOL(input_mt_create_slots); diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index ea67c49146a3..b95231763911 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input, const struct bcm5974_config *cfg, const struct tp_finger *f) { - input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); - input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); - input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); - input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); + input_report_abs(input, ABS_MT_TOUCH_MAJOR, + raw2int(f->force_major) << 1); + input_report_abs(input, ABS_MT_TOUCH_MINOR, + raw2int(f->force_minor) << 1); + input_report_abs(input, ABS_MT_WIDTH_MAJOR, + raw2int(f->size_major) << 1); + input_report_abs(input, ABS_MT_WIDTH_MINOR, + raw2int(f->size_minor) << 1); input_report_abs(input, ABS_MT_ORIENTATION, MAX_FINGER_ORIENTATION - raw2int(f->orientation)); input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 46e4ba0b9246..f58513160480 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1485,8 +1485,8 @@ static int __init i8042_init(void) static void __exit i8042_exit(void) { - platform_driver_unregister(&i8042_driver); platform_device_unregister(i8042_platform_device); + platform_driver_unregister(&i8042_driver); i8042_platform_exit(); panic_blink = NULL; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 40d77ba8fdc1..6e29badb969e 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) if (features->type == WACOM_G4 || features->type == WACOM_MO) { input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); - rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); + rw = (data[7] & 0x04) - (data[7] & 0x03); } else { input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); - rw = -(signed)data[6]; + rw = -(signed char)data[6]; } input_report_rel(input, REL_WHEEL, rw); } diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index bd2755e8d9a3..f332c52968b7 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto err; } - err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); - - if (!err) { + if (ocr & R4_MEMORY_PRESENT + && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { card->type = MMC_TYPE_SD_COMBO; if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 5f3a599ead07..87226cd202a5 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -66,6 +66,7 @@ #include <linux/clk.h> #include <linux/atmel_pdc.h> #include <linux/gfp.h> +#include <linux/highmem.h> #include <linux/mmc/host.h> diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index 9a68ff4353a2..5a950b16d9e6 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host) while (delay--) { reg = readw(host->base + MMC_REG_STATUS); - if (reg & STATUS_CARD_BUS_CLK_RUN) + if (reg & STATUS_CARD_BUS_CLK_RUN) { /* Check twice before cut */ reg = readw(host->base + MMC_REG_STATUS); if (reg & STATUS_CARD_BUS_CLK_RUN) return 0; + } if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) return 0; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 4a8776f8afdd..4526d2791f29 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev) int ret = 0; struct platform_device *pdev = to_platform_device(dev); struct omap_hsmmc_host *host = platform_get_drvdata(pdev); - pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */ if (host && host->suspended) return 0; @@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev) } } cancel_work_sync(&host->mmc_carddetect_work); - mmc_host_enable(host->mmc); ret = mmc_suspend_host(host->mmc); + mmc_host_enable(host->mmc); if (ret == 0) { omap_hsmmc_disable_irq(host); OMAP_HSMMC_WRITE(host->base, HCTL, diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2e16e0a90a5e..976330de379e 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) host->pio_active = XFER_NONE; #ifdef CONFIG_MMC_S3C_PIODMA - host->dodma = host->pdata->dma; + host->dodma = host->pdata->use_dma; #endif host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ee7d0a5a51c4..69d98e3bf6ab 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) { struct mmc_data *data = host->data; + void *sg_virt; unsigned short *buf; unsigned int count; unsigned long flags; @@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) return; } - buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + - host->sg_off); + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); + buf = (unsigned short *)(sg_virt + host->sg_off); count = host->sg_ptr->length - host->sg_off; if (count > data->blksz) @@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) host->sg_off += count; - tmio_mmc_kunmap_atomic(host, &flags); + tmio_mmc_kunmap_atomic(sg_virt, &flags); if (host->sg_off == host->sg_ptr->length) tmio_mmc_next_sg(host); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 64f7d5dfc106..0fedc78e3ea5 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -82,10 +82,7 @@ #define ack_mmc_irqs(host, i) \ do { \ - u32 mask;\ - mask = sd_ctrl_read32((host), CTL_STATUS); \ - mask &= ~((i) & TMIO_MASK_IRQ); \ - sd_ctrl_write32((host), CTL_STATUS, mask); \ + sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ } while (0) @@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) return --host->sg_len; } -static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, +static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) { - struct scatterlist *sg = host->sg_ptr; - local_irq_save(*flags); return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; } -static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, +static inline void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags) { - kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); + kunmap_atomic(virt, KM_BIO_SRC_IRQ); local_irq_restore(*flags); } diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index a045559c81cf..85671adae455 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -1994,10 +1994,9 @@ vortex_error(struct net_device *dev, int status) } } - if (status & RxEarly) { /* Rx early is unused. */ - vortex_rx(dev); + if (status & RxEarly) /* Rx early is unused. */ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); - } + if (status & StatsFull) { /* Empty statistics. */ static int DoneDidThat; if (vortex_debug > 4) @@ -2298,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id) if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { if (status == 0xffff) break; + if (status & RxEarly) + vortex_rx(dev); + spin_unlock(&vp->window_lock); vortex_error(dev, status); + spin_lock(&vp->window_lock); + window_set(vp, 7); } if (--work_done < 0) { diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 37617abc1647..1e620e287ae0 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget) b44_tx(bp); /* spin_unlock(&bp->tx_lock); */ } + if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ + bp->istat &= ~ISTAT_RFO; + b44_disable_ints(bp); + ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + } + spin_unlock_irqrestore(&bp->lock, flags); work_done = 0; diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 99197bd54da5..53306bf3f401 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -181,6 +181,7 @@ struct be_drvr_stats { u64 be_rx_bytes_prev; u64 be_rx_pkts; u32 be_rx_rate; + u32 be_rx_mcast_pkt; /* number of non ether type II frames dropped where * frame len > length field of Mac Hdr */ u32 be_802_3_dropped_frames; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 3d305494a606..34abcc9403d6 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status) while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ - BUG_ON(!is_link_state_evt(compl->flags)); - - /* Interpret compl as a async link evt */ - be_async_link_state_process(adapter, + if (is_link_state_evt(compl->flags)) + be_async_link_state_process(adapter, (struct be_async_event_link_state *) compl); } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { *status = be_mcc_compl_process(adapter, compl); @@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) if (msecs > 4000) { dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); - be_dump_ue(adapter); + be_detect_dump_ue(adapter); return -1; } diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index bdc10a28cfda..ad1e6fac60c5 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, extern int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd); extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); -extern void be_dump_ue(struct be_adapter *adapter); +extern void be_detect_dump_ue(struct be_adapter *adapter); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index cd16243c7c36..13f0abbc5205 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(be_rx_events)}, {DRVSTAT_INFO(be_tx_compl)}, {DRVSTAT_INFO(be_rx_compl)}, + {DRVSTAT_INFO(be_rx_mcast_pkt)}, {DRVSTAT_INFO(be_ethrx_post_fail)}, {DRVSTAT_INFO(be_802_3_dropped_frames)}, {DRVSTAT_INFO(be_802_3_malformed_frames)}, diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 5d38046402b2..a2ec5df0d733 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -167,8 +167,11 @@ #define FLASH_FCoE_BIOS_START_g3 (13631488) #define FLASH_REDBOOT_START_g3 (262144) - - +/************* Rx Packet Type Encoding **************/ +#define BE_UNICAST_PACKET 0 +#define BE_MULTICAST_PACKET 1 +#define BE_BROADCAST_PACKET 2 +#define BE_RSVD_PACKET 3 /* * BE descriptors: host memory data structures whose formats diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 74e146f470c6..6eda7a022256 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter) dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; + dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt; /* bad pkts received */ dev_stats->rx_errors = port_stats->rx_crc_errors + @@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter) /* no space available in linux */ dev_stats->tx_dropped = 0; - dev_stats->multicast = port_stats->rx_multicast_frames; dev_stats->collisions = 0; /* detailed tx_errors */ @@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter) } static void be_rx_stats_update(struct be_adapter *adapter, - u32 pktsize, u16 numfrags) + u32 pktsize, u16 numfrags, u8 pkt_type) { struct be_drvr_stats *stats = drvr_stats(adapter); @@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter, stats->be_rx_frags += numfrags; stats->be_rx_bytes += pktsize; stats->be_rx_pkts++; + + if (pkt_type == BE_MULTICAST_PACKET) + stats->be_rx_mcast_pkt++; } static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) @@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter, u16 rxq_idx, i, j; u32 pktsize, hdr_len, curr_frag_len, size; u8 *start; + u8 pkt_type; rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); + pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); page_info = get_rx_page_info(adapter, rxq_idx); @@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, BUG_ON(j > MAX_SKB_FRAGS); done: - be_rx_stats_update(adapter, pktsize, num_rcvd); + be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type); } /* Process the RX completion indicated by rxcp when GRO is disabled */ @@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; u16 i, rxq_idx = 0, vid, j; u8 vtm; + u8 pkt_type; num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); /* Is it a flush compl that has no data */ @@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); + pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); /* vlanf could be wrongly set in some cards. * ignore if vtm is not set */ @@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); } - be_rx_stats_update(adapter, pkt_size, num_rcvd); + be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type); } static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) @@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget) return 1; } -static inline bool be_detect_ue(struct be_adapter *adapter) -{ - u32 online0 = 0, online1 = 0; - - pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0); - - pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1); - - if (!online0 || !online1) { - adapter->ue_detected = true; - dev_err(&adapter->pdev->dev, - "UE Detected!! online0=%d online1=%d\n", - online0, online1); - return true; - } - - return false; -} - -void be_dump_ue(struct be_adapter *adapter) +void be_detect_dump_ue(struct be_adapter *adapter) { u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; u32 i; @@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter) ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); + if (ue_status_lo || ue_status_hi) { + adapter->ue_detected = true; + dev_err(&adapter->pdev->dev, "UE Detected!!\n"); + } + if (ue_status_lo) { for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { if (ue_status_lo & 1) @@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work) adapter->rx_post_starved = false; be_post_rx_frags(adapter); } - if (!adapter->ue_detected) { - if (be_detect_ue(adapter)) - be_dump_ue(adapter); - } + if (!adapter->ue_detected) + be_detect_dump_ue(adapter); schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2cc4cfc31892..3b16f62d5606 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * so it can wait */ bond_for_each_slave(bond, slave, i) { + unsigned long trans_start = dev_trans_start(slave->dev); + if (slave->link != BOND_LINK_UP) { - if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && - time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { + if (time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + delta_in_ticks) && + time_in_range(jiffies, + slave->dev->last_rx - delta_in_ticks, + slave->dev->last_rx + delta_in_ticks)) { slave->link = BOND_LINK_UP; slave->state = BOND_STATE_ACTIVE; @@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || - (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { + if (!time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + 2 * delta_in_ticks) || + !time_in_range(jiffies, + slave->dev->last_rx - delta_in_ticks, + slave->dev->last_rx + 2 * delta_in_ticks)) { slave->link = BOND_LINK_DOWN; slave->state = BOND_STATE_BACKUP; @@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) { struct slave *slave; int i, commit = 0; + unsigned long trans_start; bond_for_each_slave(bond, slave, i) { slave->new_link = BOND_LINK_NOCHANGE; if (slave->link != BOND_LINK_UP) { - if (time_before_eq(jiffies, slave_last_rx(bond, slave) + - delta_in_ticks)) { + if (time_in_range(jiffies, + slave_last_rx(bond, slave) - delta_in_ticks, + slave_last_rx(bond, slave) + delta_in_ticks)) { + slave->new_link = BOND_LINK_UP; commit++; } @@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * active. This avoids bouncing, as the last receive * times need a full ARP monitor cycle to be updated. */ - if (!time_after_eq(jiffies, slave->jiffies + - 2 * delta_in_ticks)) + if (time_in_range(jiffies, + slave->jiffies - delta_in_ticks, + slave->jiffies + 2 * delta_in_ticks)) continue; /* @@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ if (slave->state == BOND_STATE_BACKUP && !bond->current_arp_slave && - time_after(jiffies, slave_last_rx(bond, slave) + - 3 * delta_in_ticks)) { + !time_in_range(jiffies, + slave_last_rx(bond, slave) - delta_in_ticks, + slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { + slave->new_link = BOND_LINK_DOWN; commit++; } @@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * - (more than 2*delta since receive AND * the bond has an IP address) */ + trans_start = dev_trans_start(slave->dev); if ((slave->state == BOND_STATE_ACTIVE) && - (time_after_eq(jiffies, dev_trans_start(slave->dev) + - 2 * delta_in_ticks) || - (time_after_eq(jiffies, slave_last_rx(bond, slave) - + 2 * delta_in_ticks)))) { + (!time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + 2 * delta_in_ticks) || + !time_in_range(jiffies, + slave_last_rx(bond, slave) - delta_in_ticks, + slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { + slave->new_link = BOND_LINK_DOWN; commit++; } @@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) { struct slave *slave; int i; + unsigned long trans_start; bond_for_each_slave(bond, slave, i) { switch (slave->new_link) { @@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) continue; case BOND_LINK_UP: + trans_start = dev_trans_start(slave->dev); if ((!bond->curr_active_slave && - time_before_eq(jiffies, - dev_trans_start(slave->dev) + - delta_in_ticks)) || + time_in_range(jiffies, + trans_start - delta_in_ticks, + trans_start + delta_in_ticks)) || bond->curr_active_slave != slave) { slave->link = BOND_LINK_UP; bond->current_arp_slave = NULL; diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index b4fb07a6f13f..51919fcd50c2 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); - if (rxlen > 0) { - skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); - if (!skb) { - /* todo - dump frame and move on */ - } + if (rxlen > 4) { + unsigned int rxalign; + + rxlen -= 4; + rxalign = ALIGN(rxlen, 4); + skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign); + if (skb) { - /* two bytes to ensure ip is aligned, and four bytes - * for the status header and 4 bytes of garbage */ - skb_reserve(skb, 2 + 4 + 4); + /* 4 bytes of status header + 4 bytes of + * garbage: we put them before ethernet + * header, so that they are copied, + * but ignored. + */ - rxpkt = skb_put(skb, rxlen - 4) - 8; + rxpkt = skb_put(skb, rxlen) - 8; - /* align the packet length to 4 bytes, and add 4 bytes - * as we're getting the rx status header as well */ - ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8); + ks8851_rdfifo(ks, rxpkt, rxalign + 8); - if (netif_msg_pktdata(ks)) - ks8851_dbg_dumpkkt(ks, rxpkt); + if (netif_msg_pktdata(ks)) + ks8851_dbg_dumpkkt(ks, rxpkt); - skb->protocol = eth_type_trans(skb, ks->netdev); - netif_rx(skb); + skb->protocol = eth_type_trans(skb, ks->netdev); + netif_rx(skb); - ks->netdev->stats.rx_packets++; - ks->netdev->stats.rx_bytes += rxlen - 4; + ks->netdev->stats.rx_packets++; + ks->netdev->stats.rx_bytes += rxlen; + } } ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); diff --git a/drivers/net/niu.c b/drivers/net/niu.c index bc695d53cdcc..fe6983af6918 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np, struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; int i, idx, cnt; - u16 n_entries; unsigned long flags; - + int ret = 0; /* put the tcam size here */ nfc->data = tcam_get_size(np); niu_lock_parent(np, flags); - n_entries = nfc->rule_cnt; for (cnt = 0, i = 0; i < nfc->data; i++) { idx = tcam_get_index(np, i); tp = &parent->tcam[idx]; if (!tp->valid) continue; + if (cnt == nfc->rule_cnt) { + ret = -EMSGSIZE; + break; + } rule_locs[cnt] = i; cnt++; } niu_unlock_parent(np, flags); - if (n_entries != cnt) { - /* print warning, this should not happen */ - netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n", - np->parent->index, __func__, n_entries, cnt); - } - - return 0; + return ret; } static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index bbb7951b9c4c..ea0461eb2dbe 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev) if (!netif_running(dev)) return 0; - spin_lock(&priv->lock); - if (priv->shutdown) { /* Re-open the interface and re-init the MAC/DMA - and the rings. */ + and the rings (i.e. on hibernation stage) */ stmmac_open(dev); - goto out_resume; + return 0; } + spin_lock(&priv->lock); + /* Power Down bit, into the PM register, is cleared * automatically as soon as a magic packet or a Wake-up frame * is received. Anyway, it's better to manually clear @@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev) netif_start_queue(dev); -out_resume: spin_unlock(&priv->lock); return 0; } diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 8ed30fa35d0a..b2bcf99e6f08 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = { .ndo_get_stats = &ipheth_stats, }; -static struct device_type ipheth_type = { - .name = "wwan", -}; - static int ipheth_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf, netdev->netdev_ops = &ipheth_netdev_ops; netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; - strcpy(netdev->name, "wwan%d"); + strcpy(netdev->name, "eth%d"); dev = netdev_priv(netdev); dev->udev = udev; @@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf, SET_NETDEV_DEV(netdev, &intf->dev); SET_ETHTOOL_OPS(netdev, &ops); - SET_NETDEV_DEVTYPE(netdev, &ipheth_type); retval = register_netdev(netdev); if (retval) { diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fd69095ef6e3..f53412368ce1 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | - NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; + NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; ret = register_netdev(dev); if (ret < 0) diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index a9352b2c7ac4..b7e755f4178a 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; - -static void end_sync(void) -{ - end_cpu_work(); - /* make sure we don't leak task structs */ - process_task_mortuary(); - process_task_mortuary(); -} - - int sync_start(void) { int err; @@ -158,7 +148,7 @@ int sync_start(void) if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; - start_cpu_work(); + mutex_lock(&buffer_mutex); err = task_handoff_register(&task_free_nb); if (err) @@ -173,7 +163,10 @@ int sync_start(void) if (err) goto out4; + start_cpu_work(); + out: + mutex_unlock(&buffer_mutex); return err; out4: profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); @@ -182,7 +175,6 @@ out3: out2: task_handoff_unregister(&task_free_nb); out1: - end_sync(); free_cpumask_var(marked_cpus); goto out; } @@ -190,11 +182,20 @@ out1: void sync_stop(void) { + /* flush buffers */ + mutex_lock(&buffer_mutex); + end_cpu_work(); unregister_module_notifier(&module_load_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); task_handoff_unregister(&task_free_nb); - end_sync(); + mutex_unlock(&buffer_mutex); + flush_scheduled_work(); + + /* make sure we don't leak task structs */ + process_task_mortuary(); + process_task_mortuary(); + free_cpumask_var(marked_cpus); } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 219f79e2210a..f179ac2ea801 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -120,8 +120,6 @@ void end_cpu_work(void) cancel_delayed_work(&b->work); } - - flush_scheduled_work(); } /* diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 72b2bcc2c224..d4fb82d85e9b 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c @@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state) enable_irq_wake(IRQ_RTC); bfin_rtc_sync_pending(&pdev->dev); } else - bfin_rtc_int_clear(-1); + bfin_rtc_int_clear(0); return 0; } @@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev) { if (device_may_wakeup(&pdev->dev)) disable_irq_wake(IRQ_RTC); - else - bfin_write_RTC_ISTAT(-1); + + /* + * Since only some of the RTC bits are maintained externally in the + * Vbat domain, we need to wait for the RTC MMRs to be synced into + * the core after waking up. This happens every RTC 1HZ. Once that + * has happened, we can go ahead and re-enable the important write + * complete interrupt event. + */ + while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC)) + continue; + bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE); return 0; } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 66377f3e28b8..d60557cae8ef 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t) t->time.tm_isdst = -1; t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); - return rtc_valid_tm(t); + return 0; } static struct rtc_class_ops m41t80_rtc_ops = { diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 6c418fe7f288..b7a6690e5b35 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id) } if (request_irq(adev->irq[0], pl031_interrupt, - IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { + IRQF_DISABLED, "rtc-pl031", ldata)) { ret = -EIO; goto out_no_irq; } diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index b7de02525ec9..85cf607fc78f 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device) if (!blkdat->request_queue) return -ENOMEM; - elevator_exit(blkdat->request_queue->elevator); - rc = elevator_init(blkdat->request_queue, "noop"); + rc = elevator_change(blkdat->request_queue, "noop"); if (rc) goto cleanup_queue; diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 7d4d2275573c..7f11f3e48e12 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); - int len = 0; - int status; + int status = 0; SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); switch (param) { @@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, default: return iscsi_host_get_param(shost, param, buf); } - return len; + return status; } int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 26350e470bcc..877324fc594c 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, memset(req, 0, sizeof(*req)); wrb->tag0 |= tag; - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); + be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, sizeof(*req)); diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index cd05e049d5f6..d0c82340f0e2 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd) { struct scsi_sense_hdr sshdr; - scmd_printk(KERN_INFO, cmd, ""); + scmd_printk(KERN_INFO, cmd, " "); scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sshdr); scsi_show_sense_hdr(&sshdr); scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sshdr); - scmd_printk(KERN_INFO, cmd, ""); + scmd_printk(KERN_INFO, cmd, " "); scsi_show_extd_sense(sshdr.asc, sshdr.ascq); } EXPORT_SYMBOL(scsi_print_sense); @@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result); void scsi_print_result(struct scsi_cmnd *cmd) { - scmd_printk(KERN_INFO, cmd, ""); + scmd_printk(KERN_INFO, cmd, " "); scsi_show_result(cmd->result); } EXPORT_SYMBOL(scsi_print_result); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f5551b5fe53..c5d0606ad097 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + /* The doorbell reset seems to cause lockups on some Smart + * Arrays (e.g. P410, P410i, maybe others). Until this is + * fixed or at least isolated, avoid the doorbell reset. + */ + use_doorbell = 0; + rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index fda4de3440c4..e88bbdde49c5 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); + WARN_ON(bio->bi_rw & REQ_WRITE); or->in.bio = bio; or->in.total_bytes = len; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 420238cc794e..114bc5a81171 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) qla24xx_disable_vp(vha); + vha->flags.delete_progress = 1; + fc_remove_host(vha->host); scsi_remove_host(vha->host); - qla2x00_free_fcports(vha); + if (vha->timer_active) { + qla2x00_vp_stop_timer(vha); + DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" + " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); + } qla24xx_deallocate_vp_id(vha); + /* No pending activities shall be there on the vha now */ + DEBUG(msleep(random32()%10)); /* Just to see if something falls on + * the net we have placed below */ + + BUG_ON(atomic_read(&vha->vref_count)); + + qla2x00_free_fcports(vha); + mutex_lock(&ha->vport_lock); ha->cur_vport_count--; clear_bit(vha->vp_idx, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); - if (vha->timer_active) { - qla2x00_vp_stop_timer(vha); - DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p " - "has stopped\n", - vha->host_no, vha->vp_idx, vha)); - } - if (vha->req->id && !ha->flags.cpu_affinity_enabled) { if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 6cfc28a25eb3..b74e6b5743dc 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -29,8 +29,6 @@ /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ -/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */ - /* * Macros use for debugging the driver. */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3a432ea0c7a3..d2a4e1530708 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2641,6 +2641,7 @@ struct qla_hw_data { #define MBX_UPDATE_FLASH_ACTIVE 3 struct mutex vport_lock; /* Virtual port synchronization */ + spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */ struct completion mbx_cmd_comp; /* Serialize mbx access */ struct completion mbx_intr_comp; /* Used for completion notification */ struct completion dcbx_comp; /* For set port config notification */ @@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host { uint32_t management_server_logged_in :1; uint32_t process_response_queue :1; uint32_t difdix_supported:1; + uint32_t delete_progress:1; } flags; atomic_t loop_state; @@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host { struct req_que *req; int fw_heartbeat_counter; int seconds_since_last_heartbeat; + + atomic_t vref_count; } scsi_qla_host_t; /* @@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host { test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ atomic_read(&ha->loop_state) == LOOP_DOWN) +#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ + atomic_inc(&__vha->vref_count); \ + mb(); \ + if (__vha->flags.delete_progress) { \ + atomic_dec(&__vha->vref_count); \ + __bail = 1; \ + } else { \ + __bail = 0; \ + } \ +} while (0) + +#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ + atomic_dec(&__vha->vref_count); \ +} while (0) + + #define qla_printk(level, ha, format, arg...) \ dev_printk(level , &((ha)->pdev->dev) , format , ## arg) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d863ed2619b5..9c383baebe27 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp) { struct srb_ctx *ctx = sp->ctx; struct srb_iocb *iocb = ctx->u.iocb_cmd; + struct scsi_qla_host *vha = sp->fcport->vha; del_timer_sync(&iocb->timer); kfree(iocb); kfree(ctx); mempool_free(sp, sp->fcport->vha->hw->srb_mempool); + + QLA_VHA_MARK_NOT_BUSY(vha); } inline srb_t * qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, unsigned long tmo) { - srb_t *sp; + srb_t *sp = NULL; struct qla_hw_data *ha = vha->hw; struct srb_ctx *ctx; struct srb_iocb *iocb; + uint8_t bail; + + QLA_VHA_MARK_BUSY(vha, bail); + if (bail) + return NULL; sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); if (!sp) @@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, iocb->timer.function = qla2x00_ctx_sp_timeout; add_timer(&iocb->timer); done: + if (!sp) + QLA_VHA_MARK_NOT_BUSY(vha); return sp; } @@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha) qla2x00_init_response_q_entries(rsp); } + spin_lock_irqsave(&ha->vport_slock, flags); /* Clear RSCN queue. */ list_for_each_entry(vp, &ha->vp_list, list) { vp->rscn_in_ptr = 0; vp->rscn_out_ptr = 0; } + + spin_unlock_irqrestore(&ha->vport_slock, flags); + ha->isp_ops->config_rings(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, /* Bypass virtual ports of the same host. */ found = 0; if (ha->num_vhosts) { + unsigned long flags; + + spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (new_fcport->d_id.b24 == vp->d_id.b24) { found = 1; break; } } + spin_unlock_irqrestore(&ha->vport_slock, flags); + if (found) continue; } @@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; struct scsi_qla_host *tvp; + unsigned long flags = 0; rval = QLA_SUCCESS; @@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) /* Check for loop ID being already in use. */ found = 0; fcport = NULL; + + spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { list_for_each_entry(fcport, &vp->vp_fcports, list) { if (fcport->loop_id == dev->loop_id && @@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) if (found) break; } + spin_unlock_irqrestore(&ha->vport_slock, flags); /* If not in use then it is free to use. */ if (!found) { @@ -3791,14 +3814,27 @@ void qla2x00_update_fcports(scsi_qla_host_t *base_vha) { fc_port_t *fcport; - struct scsi_qla_host *tvp, *vha; + struct scsi_qla_host *vha; + struct qla_hw_data *ha = base_vha->hw; + unsigned long flags; + spin_lock_irqsave(&ha->vport_slock, flags); /* Go with deferred removal of rport references. */ - list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) - list_for_each_entry(fcport, &vha->vp_fcports, list) + list_for_each_entry(vha, &base_vha->hw->vp_list, list) { + atomic_inc(&vha->vref_count); + list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport && fcport->drport && - atomic_read(&fcport->state) != FCS_UNCONFIGURED) + atomic_read(&fcport->state) != FCS_UNCONFIGURED) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_rport_del(fcport); + + spin_lock_irqsave(&ha->vport_slock, flags); + } + } + atomic_dec(&vha->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); } void @@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); - struct scsi_qla_host *tvp; + unsigned long flags; vha->flags.online = 0; ha->flags.chip_reset_done = 0; @@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha, 0); - list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &base_vha->hw->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_mark_all_devices_lost(vp, 0); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, @@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) uint8_t status = 0; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; - struct scsi_qla_host *tvp; struct req_que *req = ha->req_q_map[0]; + unsigned long flags; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); @@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) DEBUG(printk(KERN_INFO "qla2x00_abort_isp(%ld): succeeded.\n", vha->host_no)); - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { - if (vp->vp_idx) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_vp_abort_isp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else { qla_printk(KERN_INFO, ha, "qla2x00_abort_isp: **** FAILED ****\n"); @@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; struct scsi_qla_host *vp; - struct scsi_qla_host *tvp; + unsigned long flags; status = qla2x00_init_rings(vha); if (!status) { @@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) DEBUG(printk(KERN_INFO "qla82xx_restart_isp(%ld): succeeded.\n", vha->host_no)); - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { - if (vp->vp_idx) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_vp_abort_isp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); + } else { qla_printk(KERN_INFO, ha, "qla82xx_restart_isp: **** FAILED ****\n"); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6982ba70e12a..28f65be19dad 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->result = DID_ERROR << 16; break; } - } else if (!lscsi_status) { + } else { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " "of 0x%x bytes).\n", vha->host_no, cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); - cp->result = DID_ERROR << 16; - break; + cp->result = DID_ERROR << 16 | lscsi_status; + goto check_scsi_status; } cp->result = DID_OK << 16 | lscsi_status; logit = 0; +check_scsi_status: /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6009b0c69488..a595ec8264f8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; - scsi_qla_host_t *tvp; + unsigned long flags; if (rptid_entry->entry_status != 0) return; @@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, return; } - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) if (vp_idx == vp->vp_idx) break; + spin_unlock_irqrestore(&ha->vport_slock, flags); + if (!vp) return; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 987c5b0ca78e..2b69392a71a1 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) { uint32_t vp_id; struct qla_hw_data *ha = vha->hw; + unsigned long flags; /* Find an empty slot and assign an vp_id */ mutex_lock(&ha->vport_lock); @@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) set_bit(vp_id, ha->vp_idx_map); ha->num_vhosts++; vha->vp_idx = vp_id; + + spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); + spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); return vp_id; } @@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) { uint16_t vp_id; struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; mutex_lock(&ha->vport_lock); + /* + * Wait for all pending activities to finish before removing vport from + * the list. + * Lock needs to be held for safe removal from the list (it + * ensures no active vp_list traversal while the vport is removed + * from the queue) + */ + spin_lock_irqsave(&ha->vport_slock, flags); + while (atomic_read(&vha->vref_count)) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + + msleep(500); + + spin_lock_irqsave(&ha->vport_slock, flags); + } + list_del(&vha->list); + spin_unlock_irqrestore(&ha->vport_slock, flags); + vp_id = vha->vp_idx; ha->num_vhosts--; clear_bit(vp_id, ha->vp_idx_map); - list_del(&vha->list); + mutex_unlock(&ha->vport_lock); } @@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) { scsi_qla_host_t *vha; struct scsi_qla_host *tvha; + unsigned long flags; + spin_lock_irqsave(&ha->vport_slock, flags); /* Locate matching device in database. */ list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { - if (!memcmp(port_name, vha->port_name, WWN_SIZE)) + if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { + spin_unlock_irqrestore(&ha->vport_slock, flags); return vha; + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); return NULL; } @@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) { + /* + * !!! NOTE !!! + * This function, if called in contexts other than vp create, disable + * or delete, please make sure this is synchronized with the + * delete thread. + */ fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) "loop_id=0x%04x :%x\n", vha->host_no, fcport->loop_id, fcport->vp_idx)); - atomic_set(&fcport->state, FCS_DEVICE_DEAD); qla2x00_mark_device_lost(vha, fcport, 0, 0); atomic_set(&fcport->state, FCS_UNCONFIGURED); } @@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { - scsi_qla_host_t *vha, *tvha; + scsi_qla_host_t *vha; struct qla_hw_data *ha = rsp->hw; int i = 0; + unsigned long flags; - list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vha, &ha->vp_list, list) { if (vha->vp_idx) { + atomic_inc(&vha->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + switch (mb[0]) { case MBA_LIP_OCCURRED: case MBA_LOOP_UP: @@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) qla2x00_async_event(vha, rsp, mb); break; } + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vha->vref_count); } i++; } + spin_unlock_irqrestore(&ha->vport_slock, flags); } int @@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; - struct scsi_qla_host *tvp; + unsigned long flags = 0; if (vha->vp_idx) return; @@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) if (!(ha->current_topology & ISP_CFG_F)) return; - list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { - if (vp->vp_idx) + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + if (vp->vp_idx) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + ret = qla2x00_do_dpc_vp(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } } + spin_unlock_irqrestore(&ha->vport_slock, flags); } int diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 915b77a6e193..0a71cc71eab2 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp) sufficient_dsds: req_cnt = 1; + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)RD_REG_DWORD_RELAXED( + ®->req_q_out[0]); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + } + + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); if (!sp->ctx) { DEBUG(printk(KERN_INFO @@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } qla2xxx_wake_dpc(vha); + ha->flags.fw_hung = 1; if (ha->flags.mbox_busy) { - ha->flags.fw_hung = 1; ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, - "Due to fw hung, doing premature " - "completion of mbx command\n")); - complete(&ha->mbx_intr_comp); + "Due to fw hung, doing premature " + "completion of mbx command\n")); + if (test_bit(MBX_INTR_WAIT, + &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); } } - } + } else + vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; } @@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) "%s(): Adapter reset needed!\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + ha->flags.fw_hung = 1; if (ha->flags.mbox_busy) { - ha->flags.fw_hung = 1; ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, - "Need reset, doing premature " - "completion of mbx command\n")); - complete(&ha->mbx_intr_comp); + "Need reset, doing premature " + "completion of mbx command\n")); + if (test_bit(MBX_INTR_WAIT, + &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); } } else { qla82xx_check_fw_alive(vha); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8c80b49ac1c4..1e4bff695254 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2341,16 +2341,28 @@ probe_out: static void qla2x00_remove_one(struct pci_dev *pdev) { - scsi_qla_host_t *base_vha, *vha, *temp; + scsi_qla_host_t *base_vha, *vha; struct qla_hw_data *ha; + unsigned long flags; base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; - list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { - if (vha && vha->fc_vport) + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vha, &ha->vp_list, list) { + atomic_inc(&vha->vref_count); + + if (vha && vha->fc_vport) { + spin_unlock_irqrestore(&ha->vport_slock, flags); + fc_vport_terminate(vha->fc_vport); + + spin_lock_irqsave(&ha->vport_slock, flags); + } + + atomic_dec(&vha->vref_count); } + spin_unlock_irqrestore(&ha->vport_slock, flags); set_bit(UNLOADING, &base_vha->dpc_flags); @@ -2975,10 +2987,17 @@ static struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; + uint8_t bail; + + QLA_VHA_MARK_BUSY(vha, bail); + if (bail) + return NULL; e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); - if (!e) + if (!e) { + QLA_VHA_MARK_NOT_BUSY(vha); return NULL; + } INIT_LIST_HEAD(&e->list); e->type = type; @@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); + + /* For each work completed decrement vha ref count */ + QLA_VHA_MARK_NOT_BUSY(vha); } } diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index e75ccb91317d..8edbccb3232d 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.03-k0" +#define QLA2XXX_VERSION "8.03.04-k0" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 3 +#define QLA_DRIVER_PATCH_VER 4 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ade720422c6..ee02d3838a0a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) err_exit: scsi_release_buffers(cmd); - scsi_put_command(cmd); cmd->request->special = NULL; + scsi_put_command(cmd); return error; } EXPORT_SYMBOL(scsi_init_io); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2714becc2eaf..ffa0689ee840 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode) SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); - if (atomic_dec_return(&sdkp->openers) && sdev->removable) { + if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) { if (scsi_block_when_processing_errors(sdev)) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); } @@ -2625,15 +2625,15 @@ module_exit(exit_sd); static void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { - sd_printk(KERN_INFO, sdkp, ""); + sd_printk(KERN_INFO, sdkp, " "); scsi_show_sense_hdr(sshdr); - sd_printk(KERN_INFO, sdkp, ""); + sd_printk(KERN_INFO, sdkp, " "); scsi_show_extd_sense(sshdr->asc, sshdr->ascq); } static void sd_print_result(struct scsi_disk *sdkp, int result) { - sd_printk(KERN_INFO, sdkp, ""); + sd_printk(KERN_INFO, sdkp, " "); scsi_show_result(result); } diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index a7bc8b7b09ac..2c3e89ddf069 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n) static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { - if (label) - sym_print_addr(cp->cmd, "%s: ", label); - else - sym_print_addr(cp->cmd, ""); + sym_print_addr(cp->cmd, "%s: ", label); spi_print_msg(msg); printf("\n"); @@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np) switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) - sym_print_msg(cp, NULL, np->msgin); + sym_print_msg(cp, "extended msg ", + np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); @@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np) */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) - sym_print_msg(cp, NULL, np->msgin); + sym_print_msg(cp, "1 or 2 byte ", np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 4b99117f3ecd..c579dcc9200c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, return 0; } +static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) +{ + INIT_LIST_HEAD(&work->node); + work->fn = fn; + init_waitqueue_head(&work->done); + work->flushing = 0; + work->queue_seq = work->done_seq = 0; +} + /* Init poll structure */ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, unsigned long mask, struct vhost_dev *dev) { - struct vhost_work *work = &poll->work; - init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; - INIT_LIST_HEAD(&work->node); - work->fn = fn; - init_waitqueue_head(&work->done); - work->flushing = 0; - work->queue_seq = work->done_seq = 0; + vhost_work_init(&poll->work, fn); } /* Start polling a file. We add ourselves to file's wait queue. The caller must @@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll) remove_wait_queue(poll->wqh, &poll->wait); } -/* Flush any work that has been scheduled. When calling this, don't hold any - * locks that are also used by the callback. */ -void vhost_poll_flush(struct vhost_poll *poll) +static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) { - struct vhost_work *work = &poll->work; unsigned seq; int left; int flushing; - spin_lock_irq(&poll->dev->work_lock); + spin_lock_irq(&dev->work_lock); seq = work->queue_seq; work->flushing++; - spin_unlock_irq(&poll->dev->work_lock); + spin_unlock_irq(&dev->work_lock); wait_event(work->done, ({ - spin_lock_irq(&poll->dev->work_lock); + spin_lock_irq(&dev->work_lock); left = seq - work->done_seq <= 0; - spin_unlock_irq(&poll->dev->work_lock); + spin_unlock_irq(&dev->work_lock); left; })); - spin_lock_irq(&poll->dev->work_lock); + spin_lock_irq(&dev->work_lock); flushing = --work->flushing; - spin_unlock_irq(&poll->dev->work_lock); + spin_unlock_irq(&dev->work_lock); BUG_ON(flushing < 0); } -void vhost_poll_queue(struct vhost_poll *poll) +/* Flush any work that has been scheduled. When calling this, don't hold any + * locks that are also used by the callback. */ +void vhost_poll_flush(struct vhost_poll *poll) +{ + vhost_work_flush(poll->dev, &poll->work); +} + +static inline void vhost_work_queue(struct vhost_dev *dev, + struct vhost_work *work) { - struct vhost_dev *dev = poll->dev; - struct vhost_work *work = &poll->work; unsigned long flags; spin_lock_irqsave(&dev->work_lock, flags); @@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll) spin_unlock_irqrestore(&dev->work_lock, flags); } +void vhost_poll_queue(struct vhost_poll *poll) +{ + vhost_work_queue(poll->dev, &poll->work); +} + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev) return dev->mm == current->mm ? 0 : -EPERM; } +struct vhost_attach_cgroups_struct { + struct vhost_work work; + struct task_struct *owner; + int ret; +}; + +static void vhost_attach_cgroups_work(struct vhost_work *work) +{ + struct vhost_attach_cgroups_struct *s; + s = container_of(work, struct vhost_attach_cgroups_struct, work); + s->ret = cgroup_attach_task_all(s->owner, current); +} + +static int vhost_attach_cgroups(struct vhost_dev *dev) +{ + struct vhost_attach_cgroups_struct attach; + attach.owner = current; + vhost_work_init(&attach.work, vhost_attach_cgroups_work); + vhost_work_queue(dev, &attach.work); + vhost_work_flush(dev, &attach.work); + return attach.ret; +} + /* Caller should have device mutex */ static long vhost_dev_set_owner(struct vhost_dev *dev) { @@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev) } dev->worker = worker; - err = cgroup_attach_task_current_cg(worker); + wake_up_process(worker); /* avoid contributing to loadavg */ + + err = vhost_attach_cgroups(dev); if (err) goto err_cgroup; - wake_up_process(worker); /* avoid contributing to loadavg */ return 0; err_cgroup: kthread_stop(worker); + dev->worker = NULL; err_worker: if (dev->mm) mmput(dev->mm); diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c index c91a7f70f7b0..5d786bd3e304 100644 --- a/drivers/video/pxa168fb.c +++ b/drivers/video/pxa168fb.c @@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = { .fb_imageblit = cfb_imageblit, }; -static int __init pxa168fb_init_mode(struct fb_info *info, +static int __devinit pxa168fb_init_mode(struct fb_info *info, struct pxa168fb_mach_info *mi) { struct pxa168fb_info *fbi = info->par; @@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info, return ret; } -static int __init pxa168fb_probe(struct platform_device *pdev) +static int __devinit pxa168fb_probe(struct platform_device *pdev) { struct pxa168fb_mach_info *mi; struct fb_info *info = 0; @@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = { .probe = pxa168fb_probe, }; -static int __devinit pxa168fb_init(void) +static int __init pxa168fb_init(void) { return platform_driver_register(&pxa168fb_driver); } diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index a7528b913936..fd0cc0bf9a40 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void) { int err = register_filesystem(&bm_fs_type); if (!err) { - err = register_binfmt(&misc_format); + err = insert_binfmt(&misc_format); if (err) unregister_filesystem(&bm_fs_type); } diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 612a5c38d3c1..4d0ff5ee27b8 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio) /* Allocate kernel buffer for protection data */ len = sectors * blk_integrity_tuple_size(bi); - buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp); + buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); if (unlikely(buf == NULL)) { printk(KERN_ERR "could not allocate integrity buffer\n"); - return -EIO; + return -ENOMEM; } end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; diff --git a/fs/direct-io.c b/fs/direct-io.c index 51f270b479b6..48d74c7391d1 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio) int ret = 0; if (dio->bio) { - loff_t cur_offset = dio->block_in_file << dio->blkbits; + loff_t cur_offset = dio->cur_page_fs_offset; loff_t bio_next_offset = dio->logical_offset_in_bio + dio->bio->bi_size; @@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio) * Submit now if the underlying fs is about to perform a * metadata read */ - if (dio->boundary) + else if (dio->boundary) dio_bio_submit(dio); } diff --git a/fs/exec.c b/fs/exec.c index 2d9455282744..828dd2461d6b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max) argv++; if (i++ >= max) return -E2BIG; + + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; cond_resched(); } } @@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv, while (len > 0) { int offset, bytes_to_copy; + if (fatal_signal_pending(current)) { + ret = -ERESTARTNOHAND; + goto out; + } + cond_resched(); + offset = pos % PAGE_SIZE; if (offset == 0) offset = PAGE_SIZE; @@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm, #else stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); + + if (unlikely(stack_top < mmap_min_addr) || + unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) + return -ENOMEM; + stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; diff --git a/fs/fcntl.c b/fs/fcntl.c index 6769fd0f35b8..f8cc34f542c3 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync); static int __init fcntl_init(void) { - /* please add new bits here to ensure allocation uniqueness */ - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( + /* + * Please add new bits here to ensure allocation uniqueness. + * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY + * is defined as O_NONBLOCK on some platforms and not on others. + */ + BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | - O_TRUNC | O_APPEND | O_NONBLOCK | + O_TRUNC | O_APPEND | /* O_NONBLOCK | */ __O_SYNC | O_DSYNC | FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7d9d06ba184b..81e086d8aa57 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -808,7 +808,7 @@ int bdi_writeback_thread(void *data) wb->last_active = jiffies; set_current_state(TASK_INTERRUPTIBLE); - if (!list_empty(&bdi->work_list)) { + if (!list_empty(&bdi->work_list) || kthread_should_stop()) { __set_current_state(TASK_RUNNING); continue; } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 69ad053ffd78..d367af1514ef 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc) * Called with fc->lock, unlocks it */ static void request_end(struct fuse_conn *fc, struct fuse_req *req) -__releases(&fc->lock) +__releases(fc->lock) { void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; req->end = NULL; @@ -306,8 +306,8 @@ __releases(&fc->lock) static void wait_answer_interruptible(struct fuse_conn *fc, struct fuse_req *req) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { if (signal_pending(current)) return; @@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) } static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { if (!fc->no_interrupt) { /* Any signal may interrupt this */ @@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc) /* Wait until a request is available on the pending list */ static void request_wait(struct fuse_conn *fc) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { DECLARE_WAITQUEUE(wait, current); @@ -934,7 +934,7 @@ __acquires(&fc->lock) */ static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, size_t nbytes, struct fuse_req *req) -__releases(&fc->lock) +__releases(fc->lock) { struct fuse_in_header ih; struct fuse_interrupt_in arg; @@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) * This function releases and reacquires fc->lock */ static void end_requests(struct fuse_conn *fc, struct list_head *head) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { while (!list_empty(head)) { struct fuse_req *req; @@ -1744,8 +1744,8 @@ __acquires(&fc->lock) * locked). */ static void end_io_requests(struct fuse_conn *fc) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { while (!list_empty(&fc->io)) { struct fuse_req *req = @@ -1769,6 +1769,16 @@ __acquires(&fc->lock) } } +static void end_queued_requests(struct fuse_conn *fc) +__releases(fc->lock) +__acquires(fc->lock) +{ + fc->max_background = UINT_MAX; + flush_bg_queue(fc); + end_requests(fc, &fc->pending); + end_requests(fc, &fc->processing); +} + /* * Abort all requests. * @@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc) fc->connected = 0; fc->blocked = 0; end_io_requests(fc); - end_requests(fc, &fc->pending); - end_requests(fc, &fc->processing); + end_queued_requests(fc); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); @@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file) if (fc) { spin_lock(&fc->lock); fc->connected = 0; - end_requests(fc, &fc->pending); - end_requests(fc, &fc->processing); + fc->blocked = 0; + end_queued_requests(fc); + wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); fuse_conn_put(fc); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 147c1f71bdb9..c8224587123f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) /* Called under fc->lock, may release and reacquire it */ static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { struct fuse_inode *fi = get_fuse_inode(req->inode); loff_t size = i_size_read(req->inode); @@ -1183,8 +1183,8 @@ __acquires(&fc->lock) * Called with fc->lock */ void fuse_flush_writepages(struct inode *inode) -__releases(&fc->lock) -__acquires(&fc->lock) +__releases(fc->lock) +__acquires(fc->lock) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); diff --git a/fs/minix/namei.c b/fs/minix/namei.c index e20ee85955d1..f3f3578393a4 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) inode_inc_link_count(dir); - inode = minix_new_inode(dir, mode, &err); + inode = minix_new_inode(dir, S_IFDIR | mode, &err); if (!inode) goto out_dir; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3dfef0623968..cf0d2ffb3c84 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) { static int nfs4_access_to_omode(u32 access) { - switch (access) { + switch (access & NFS4_SHARE_ACCESS_BOTH) { case NFS4_SHARE_ACCESS_READ: return O_RDONLY; case NFS4_SHARE_ACCESS_WRITE: diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 215e12ce1d85..592fae5007d1 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_CACHE_SHIFT; do { - pages[numpages] = grab_cache_page(mapping, index); + pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); if (!pages[numpages]) { ret = -ENOMEM; mlog_errno(ret); diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index ec6d12339593..c7ee03c22226 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, - "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", + "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", (unsigned int)check.bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ @@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, goto out; } - mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", + mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", (unsigned int)check.bc_crc32e, (unsigned int)crc); rc = -EIO; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 81296b4e3646..9a03c151b5ce 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -36,6 +36,7 @@ #include <linux/writeback.h> #include <linux/falloc.h> #include <linux/quotaops.h> +#include <linux/blkdev.h> #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> @@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync) if (err) goto bail; - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { + /* + * We still have to flush drive's caches to get data to the + * platter + */ + if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) + blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, + NULL, BLKDEV_IFL_WAIT); goto bail; + } journal = osb->journal->j_journal; err = jbd2_journal_force_commit(journal); @@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); BUG_ON(abs_from & (inode->i_blkbits - 1)); - page = grab_cache_page(mapping, index); + page = find_or_create_page(mapping, index, GFP_NOFS); if (!page) { ret = -ENOMEM; mlog_errno(ret); @@ -2329,7 +2338,7 @@ out_dio: BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || - ((file->f_flags & O_DIRECT) && has_refcount)) { + ((file->f_flags & O_DIRECT) && !direct_io)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0492464916b1..eece3e05d9d0 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode, OCFS2_BH_IGNORE_CACHE); } else { status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); - if (!status) + /* + * If buffer is in jbd, then its checksum may not have been + * computed as yet. + */ + if (!status && !buffer_jbd(bh)) status = ocfs2_validate_inode_block(osb->sb, bh); } if (status < 0) { diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index af2b8fe1f139..4c18f4ad93b4 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, /* * Another node might have truncated while we were waiting on * cluster locks. + * We don't check size == 0 before the shift. This is borrowed + * from do_generic_file_read. */ - last_index = size >> PAGE_CACHE_SHIFT; - if (page->index > last_index) { + last_index = (size - 1) >> PAGE_CACHE_SHIFT; + if (unlikely(!size || page->index > last_index)) { ret = -EINVAL; goto out; } @@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, * because the "write" would invalidate their data. */ if (page->index == last_index) - len = size & ~PAGE_CACHE_MASK; + len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, &fsdata, di_bh, page); diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index f171b51a74f7..a00dda2e4f16 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -472,32 +472,23 @@ leave: return status; } -static int ocfs2_mknod_locked(struct ocfs2_super *osb, - struct inode *dir, - struct inode *inode, - dev_t dev, - struct buffer_head **new_fe_bh, - struct buffer_head *parent_fe_bh, - handle_t *handle, - struct ocfs2_alloc_context *inode_ac) +static int __ocfs2_mknod_locked(struct inode *dir, + struct inode *inode, + dev_t dev, + struct buffer_head **new_fe_bh, + struct buffer_head *parent_fe_bh, + handle_t *handle, + struct ocfs2_alloc_context *inode_ac, + u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit) { int status = 0; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_dinode *fe = NULL; struct ocfs2_extent_list *fel; - u64 suballoc_loc, fe_blkno = 0; - u16 suballoc_bit; u16 feat; *new_fe_bh = NULL; - status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh, - inode_ac, &suballoc_loc, - &suballoc_bit, &fe_blkno); - if (status < 0) { - mlog_errno(status); - goto leave; - } - /* populate as many fields early on as possible - many of * these are used by the support functions here and in * callers. */ @@ -591,6 +582,34 @@ leave: return status; } +static int ocfs2_mknod_locked(struct ocfs2_super *osb, + struct inode *dir, + struct inode *inode, + dev_t dev, + struct buffer_head **new_fe_bh, + struct buffer_head *parent_fe_bh, + handle_t *handle, + struct ocfs2_alloc_context *inode_ac) +{ + int status = 0; + u64 suballoc_loc, fe_blkno = 0; + u16 suballoc_bit; + + *new_fe_bh = NULL; + + status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh, + inode_ac, &suballoc_loc, + &suballoc_bit, &fe_blkno); + if (status < 0) { + mlog_errno(status); + return status; + } + + return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, + parent_fe_bh, handle, inode_ac, + fe_blkno, suballoc_loc, suballoc_bit); +} + static int ocfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) @@ -1852,61 +1871,117 @@ bail: return status; } -static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, - struct inode **ret_orphan_dir, - u64 blkno, - char *name, - struct ocfs2_dir_lookup_result *lookup) +static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb, + struct inode **ret_orphan_dir, + struct buffer_head **ret_orphan_dir_bh) { struct inode *orphan_dir_inode; struct buffer_head *orphan_dir_bh = NULL; - int status = 0; - - status = ocfs2_blkno_stringify(blkno, name); - if (status < 0) { - mlog_errno(status); - return status; - } + int ret = 0; orphan_dir_inode = ocfs2_get_system_file_inode(osb, ORPHAN_DIR_SYSTEM_INODE, osb->slot_num); if (!orphan_dir_inode) { - status = -ENOENT; - mlog_errno(status); - return status; + ret = -ENOENT; + mlog_errno(ret); + return ret; } mutex_lock(&orphan_dir_inode->i_mutex); - status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); - if (status < 0) { - mlog_errno(status); - goto leave; + ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); + if (ret < 0) { + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); + + mlog_errno(ret); + return ret; } - status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, - orphan_dir_bh, name, - OCFS2_ORPHAN_NAMELEN, lookup); - if (status < 0) { - ocfs2_inode_unlock(orphan_dir_inode, 1); + *ret_orphan_dir = orphan_dir_inode; + *ret_orphan_dir_bh = orphan_dir_bh; - mlog_errno(status); - goto leave; + return 0; +} + +static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode, + struct buffer_head *orphan_dir_bh, + u64 blkno, + char *name, + struct ocfs2_dir_lookup_result *lookup) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb); + + ret = ocfs2_blkno_stringify(blkno, name); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, + orphan_dir_bh, name, + OCFS2_ORPHAN_NAMELEN, lookup); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + return 0; +} + +/** + * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for + * insertion of an orphan. + * @osb: ocfs2 file system + * @ret_orphan_dir: Orphan dir inode - returned locked! + * @blkno: Actual block number of the inode to be inserted into orphan dir. + * @lookup: dir lookup result, to be passed back into functions like + * ocfs2_orphan_add + * + * Returns zero on success and the ret_orphan_dir, name and lookup + * fields will be populated. + * + * Returns non-zero on failure. + */ +static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, + struct inode **ret_orphan_dir, + u64 blkno, + char *name, + struct ocfs2_dir_lookup_result *lookup) +{ + struct inode *orphan_dir_inode = NULL; + struct buffer_head *orphan_dir_bh = NULL; + int ret = 0; + + ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode, + &orphan_dir_bh); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh, + blkno, name, lookup); + if (ret < 0) { + mlog_errno(ret); + goto out; } *ret_orphan_dir = orphan_dir_inode; -leave: - if (status) { +out: + brelse(orphan_dir_bh); + + if (ret) { + ocfs2_inode_unlock(orphan_dir_inode, 1); mutex_unlock(&orphan_dir_inode->i_mutex); iput(orphan_dir_inode); } - brelse(orphan_dir_bh); - - mlog_exit(status); - return status; + mlog_exit(ret); + return ret; } static int ocfs2_orphan_add(struct ocfs2_super *osb, @@ -2053,6 +2128,99 @@ leave: return status; } +/** + * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly + * allocated file. This is different from the typical 'add to orphan dir' + * operation in that the inode does not yet exist. This is a problem because + * the orphan dir stringifies the inode block number to come up with it's + * dirent. Obviously if the inode does not yet exist we have a chicken and egg + * problem. This function works around it by calling deeper into the orphan + * and suballoc code than other callers. Use this only by necessity. + * @dir: The directory which this inode will ultimately wind up under - not the + * orphan dir! + * @dir_bh: buffer_head the @dir inode block + * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled + * with the string to be used for orphan dirent. Pass back to the orphan dir + * code. + * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan + * dir code. + * @ret_di_blkno: block number where the new inode will be allocated. + * @orphan_insert: Dir insert context to be passed back into orphan dir code. + * @ret_inode_ac: Inode alloc context to be passed back to the allocator. + * + * Returns zero on success and the ret_orphan_dir, name and lookup + * fields will be populated. + * + * Returns non-zero on failure. + */ +static int ocfs2_prep_new_orphaned_file(struct inode *dir, + struct buffer_head *dir_bh, + char *orphan_name, + struct inode **ret_orphan_dir, + u64 *ret_di_blkno, + struct ocfs2_dir_lookup_result *orphan_insert, + struct ocfs2_alloc_context **ret_inode_ac) +{ + int ret; + u64 di_blkno; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + struct inode *orphan_dir = NULL; + struct buffer_head *orphan_dir_bh = NULL; + struct ocfs2_alloc_context *inode_ac = NULL; + + ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + /* reserve an inode spot */ + ret = ocfs2_reserve_new_inode(osb, &inode_ac); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac, + &di_blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh, + di_blkno, orphan_name, orphan_insert); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + +out: + if (ret == 0) { + *ret_orphan_dir = orphan_dir; + *ret_di_blkno = di_blkno; + *ret_inode_ac = inode_ac; + /* + * orphan_name and orphan_insert are already up to + * date via prepare_orphan_dir + */ + } else { + /* Unroll reserve_new_inode* */ + if (inode_ac) + ocfs2_free_alloc_context(inode_ac); + + /* Unroll orphan dir locking */ + mutex_unlock(&orphan_dir->i_mutex); + ocfs2_inode_unlock(orphan_dir, 1); + iput(orphan_dir); + } + + brelse(orphan_dir_bh); + + return 0; +} + int ocfs2_create_inode_in_orphan(struct inode *dir, int mode, struct inode **new_inode) @@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, struct buffer_head *new_di_bh = NULL; struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; + u64 uninitialized_var(di_blkno), suballoc_loc; + u16 suballoc_bit; status = ocfs2_inode_lock(dir, &parent_di_bh, 1); if (status < 0) { @@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, return status; } - /* - * We give the orphan dir the root blkno to fake an orphan name, - * and allocate enough space for our insertion. - */ - status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, - osb->root_blkno, - orphan_name, &orphan_insert); - if (status < 0) { - mlog_errno(status); - goto leave; - } - - /* reserve an inode spot */ - status = ocfs2_reserve_new_inode(osb, &inode_ac); + status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh, + orphan_name, &orphan_dir, + &di_blkno, &orphan_insert, &inode_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, goto leave; did_quota_inode = 1; - inode->i_nlink = 0; - /* do the real work now. */ - status = ocfs2_mknod_locked(osb, dir, inode, - 0, &new_di_bh, parent_di_bh, handle, - inode_ac); + status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac, + &suballoc_loc, + &suballoc_bit, di_blkno); if (status < 0) { mlog_errno(status); goto leave; } - status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name); + inode->i_nlink = 0; + /* do the real work now. */ + status = __ocfs2_mknod_locked(dir, inode, + 0, &new_di_bh, parent_di_bh, handle, + inode_ac, di_blkno, suballoc_loc, + suballoc_bit); if (status < 0) { mlog_errno(status); goto leave; diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 73a11ccfd4c2..0afeda83120f 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, if (map_end & (PAGE_CACHE_SIZE - 1)) to = map_end & (PAGE_CACHE_SIZE - 1); - page = grab_cache_page(mapping, page_index); + page = find_or_create_page(mapping, page_index, GFP_NOFS); /* * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page @@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, if (map_end > end) map_end = end; - page = grab_cache_page(context->inode->i_mapping, page_index); + page = find_or_create_page(context->inode->i_mapping, + page_index, GFP_NOFS); BUG_ON(!page); wait_on_page_writeback(page); diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index a8e6a95a353f..8a286f54dca1 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -57,11 +57,28 @@ struct ocfs2_suballoc_result { u64 sr_bg_blkno; /* The bg we allocated from. Set to 0 when a block group is contiguous. */ + u64 sr_bg_stable_blkno; /* + * Doesn't change, always + * set to target block + * group descriptor + * block. + */ u64 sr_blkno; /* The first allocated block */ unsigned int sr_bit_offset; /* The bit in the bg */ unsigned int sr_bits; /* How many bits we claimed */ }; +static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res) +{ + if (res->sr_blkno == 0) + return 0; + + if (res->sr_bg_blkno) + return res->sr_bg_blkno; + + return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset); +} + static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); @@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac) brelse(ac->ac_bh); ac->ac_bh = NULL; ac->ac_resv = NULL; + if (ac->ac_find_loc_priv) { + kfree(ac->ac_find_loc_priv); + ac->ac_find_loc_priv = NULL; + } } void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) @@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, if (!ret) ocfs2_bg_discontig_fix_result(ac, gd, res); + /* + * sr_bg_blkno might have been changed by + * ocfs2_bg_discontig_fix_result + */ + res->sr_bg_stable_blkno = group_bh->b_blocknr; + + if (ac->ac_find_loc_only) + goto out_loc_only; + ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, res->sr_bits, le16_to_cpu(gd->bg_chain)); @@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, if (ret < 0) mlog_errno(ret); +out_loc_only: *bits_left = le16_to_cpu(gd->bg_free_bits_count); out: @@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, { int status; u16 chain; - u32 tmp_used; u64 next_group; struct inode *alloc_inode = ac->ac_inode; struct buffer_head *group_bh = NULL; @@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, if (!status) ocfs2_bg_discontig_fix_result(ac, bg, res); + /* + * sr_bg_blkno might have been changed by + * ocfs2_bg_discontig_fix_result + */ + res->sr_bg_stable_blkno = group_bh->b_blocknr; /* * Keep track of previous block descriptor read. When @@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, } } - /* Ok, claim our bits now: set the info on dinode, chainlist - * and then the group */ - status = ocfs2_journal_access_di(handle, - INODE_CACHE(alloc_inode), - ac->ac_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (status < 0) { + if (ac->ac_find_loc_only) + goto out_loc_only; + + status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, + ac->ac_bh, res->sr_bits, + chain); + if (status) { mlog_errno(status); goto bail; } - tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used); - fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used); - le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits); - ocfs2_journal_dirty(handle, ac->ac_bh); - status = ocfs2_block_group_set_bits(handle, alloc_inode, bg, @@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, (unsigned long long)le64_to_cpu(fe->i_blkno)); +out_loc_only: *bits_left = le16_to_cpu(bg->bg_free_bits_count); bail: brelse(group_bh); @@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, int status; u16 victim, i; u16 bits_left = 0; + u64 hint = ac->ac_last_group; struct ocfs2_chain_list *cl; struct ocfs2_dinode *fe; @@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, goto bail; } - res->sr_bg_blkno = ac->ac_last_group; + res->sr_bg_blkno = hint; if (res->sr_bg_blkno) { /* Attempt to short-circuit the usual search mechanism * by jumping straight to the most recently used @@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, res, &bits_left); - if (!status) + if (!status) { + hint = ocfs2_group_from_res(res); goto set_hint; + } if (status < 0 && status != -ENOSPC) { mlog_errno(status); goto bail; @@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, ac->ac_chain = i; status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, res, &bits_left); - if (!status) + if (!status) { + hint = ocfs2_group_from_res(res); break; + } if (status < 0 && status != -ENOSPC) { mlog_errno(status); goto bail; @@ -1936,7 +1972,7 @@ set_hint: if (bits_left < min_bits) ac->ac_last_group = 0; else - ac->ac_last_group = res->sr_bg_blkno; + ac->ac_last_group = hint; } bail: @@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir, OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; } +int ocfs2_find_new_inode_loc(struct inode *dir, + struct buffer_head *parent_fe_bh, + struct ocfs2_alloc_context *ac, + u64 *fe_blkno) +{ + int ret; + handle_t *handle = NULL; + struct ocfs2_suballoc_result *res; + + BUG_ON(!ac); + BUG_ON(ac->ac_bits_given != 0); + BUG_ON(ac->ac_bits_wanted != 1); + BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE); + + res = kzalloc(sizeof(*res), GFP_NOFS); + if (res == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac); + + /* + * The handle started here is for chain relink. Alternatively, + * we could just disable relink for these calls. + */ + handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + handle = NULL; + mlog_errno(ret); + goto out; + } + + /* + * This will instruct ocfs2_claim_suballoc_bits and + * ocfs2_search_one_group to search but save actual allocation + * for later. + */ + ac->ac_find_loc_only = 1; + + ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ac->ac_find_loc_priv = res; + *fe_blkno = res->sr_blkno; + +out: + if (handle) + ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle); + + if (ret) + kfree(res); + + return ret; +} + +int ocfs2_claim_new_inode_at_loc(handle_t *handle, + struct inode *dir, + struct ocfs2_alloc_context *ac, + u64 *suballoc_loc, + u16 *suballoc_bit, + u64 di_blkno) +{ + int ret; + u16 chain; + struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv; + struct buffer_head *bg_bh = NULL; + struct ocfs2_group_desc *bg; + struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data; + + /* + * Since di_blkno is being passed back in, we check for any + * inconsistencies which may have happened between + * calls. These are code bugs as di_blkno is not expected to + * change once returned from ocfs2_find_new_inode_loc() + */ + BUG_ON(res->sr_blkno != di_blkno); + + ret = ocfs2_read_group_descriptor(ac->ac_inode, di, + res->sr_bg_stable_blkno, &bg_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + bg = (struct ocfs2_group_desc *) bg_bh->b_data; + chain = le16_to_cpu(bg->bg_chain); + + ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle, + ac->ac_bh, res->sr_bits, + chain); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_block_group_set_bits(handle, + ac->ac_inode, + bg, + bg_bh, + res->sr_bit_offset, + res->sr_bits); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, + (unsigned long long)di_blkno); + + atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + BUG_ON(res->sr_bits != 1); + + *suballoc_loc = res->sr_bg_blkno; + *suballoc_bit = res->sr_bit_offset; + ac->ac_bits_given++; + ocfs2_save_inode_ac_group(dir, ac); + +out: + brelse(bg_bh); + + return ret; +} + int ocfs2_claim_new_inode(handle_t *handle, struct inode *dir, struct buffer_head *parent_fe_bh, @@ -2567,7 +2733,8 @@ out: * suballoc_bit. */ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, - u16 *suballoc_slot, u16 *suballoc_bit) + u16 *suballoc_slot, u64 *group_blkno, + u16 *suballoc_bit) { int status; struct buffer_head *inode_bh = NULL; @@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); if (suballoc_bit) *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); + if (group_blkno) + *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc); bail: brelse(inode_bh); @@ -2621,7 +2790,8 @@ bail: */ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, struct inode *suballoc, - struct buffer_head *alloc_bh, u64 blkno, + struct buffer_head *alloc_bh, + u64 group_blkno, u64 blkno, u16 bit, int *res) { struct ocfs2_dinode *alloc_di; @@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, goto bail; } - if (alloc_di->i_suballoc_loc) - bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc); - else - bg_blkno = ocfs2_which_suballoc_group(blkno, bit); + bg_blkno = group_blkno ? group_blkno : + ocfs2_which_suballoc_group(blkno, bit); status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, &group_bh); if (status < 0) { @@ -2680,6 +2848,7 @@ bail: int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) { int status; + u64 group_blkno = 0; u16 suballoc_bit = 0, suballoc_slot = 0; struct inode *inode_alloc_inode; struct buffer_head *alloc_bh = NULL; @@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) mlog_entry("blkno: %llu", (unsigned long long)blkno); status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, - &suballoc_bit); + &group_blkno, &suballoc_bit); if (status < 0) { mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); goto bail; @@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) } status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, - blkno, suballoc_bit, res); + group_blkno, blkno, suballoc_bit, res); if (status < 0) mlog(ML_ERROR, "test suballoc bit failed %d\n", status); diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index a017dd3ee7d9..b8afabfeede4 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -56,6 +56,9 @@ struct ocfs2_alloc_context { u64 ac_max_block; /* Highest block number to allocate. 0 is is the same as ~0 - unlimited */ + int ac_find_loc_only; /* hack for reflink operation ordering */ + struct ocfs2_suballoc_result *ac_find_loc_priv; /* */ + struct ocfs2_alloc_reservation *ac_resv; }; @@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et, struct ocfs2_alloc_context **meta_ac); int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); + + + +/* + * The following two interfaces are for ocfs2_create_inode_in_orphan(). + */ +int ocfs2_find_new_inode_loc(struct inode *dir, + struct buffer_head *parent_fe_bh, + struct ocfs2_alloc_context *ac, + u64 *fe_blkno); + +int ocfs2_claim_new_inode_at_loc(handle_t *handle, + struct inode *dir, + struct ocfs2_alloc_context *ac, + u64 *suballoc_loc, + u16 *suballoc_bit, + u64 di_blkno); + #endif /* _CHAINALLOC_H_ */ diff --git a/fs/proc/page.c b/fs/proc/page.c index 180cf5a0bd67..3b8b45660331 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page) u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); #endif -#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR +#ifdef CONFIG_ARCH_USES_PG_UNCACHED u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); #endif diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 439fc1f1c1c4..271afc48b9a5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; if (vma->vm_flags & VM_GROWSDOWN) - start += PAGE_SIZE; + if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) + start += PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index d72cf2bb054a..286e36e21dae 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1932,7 +1932,8 @@ xfs_buf_init(void) if (!xfs_buf_zone) goto out; - xfslogd_workqueue = create_workqueue("xfslogd"); + xfslogd_workqueue = alloc_workqueue("xfslogd", + WQ_RESCUER | WQ_HIGHPRI, 1); if (!xfslogd_workqueue) goto out_free_buf_zone; diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 4fec427b83ef..3b9e626f7cd1 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr( { struct fsxattr fa; + memset(&fa, 0, sizeof(struct fsxattr)); + xfs_ilock(ip, XFS_ILOCK_SHARED); fa.fsx_xflags = xfs_ip2xflags(ip); fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index c7376bf80b06..8ca18e26d7e3 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -16,15 +16,27 @@ * While the GPIO programming interface defines valid GPIO numbers * to be in the range 0..MAX_INT, this library restricts them to the * smaller range 0..ARCH_NR_GPIOS-1. + * + * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of + * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is + * actually an estimate of a board-specific value. */ #ifndef ARCH_NR_GPIOS #define ARCH_NR_GPIOS 256 #endif +/* + * "valid" GPIO numbers are nonnegative and may be passed to + * setup routines like gpio_request(). only some valid numbers + * can successfully be requested and used. + * + * Invalid GPIO numbers are useful for indicating no-such-GPIO in + * platform data and other tables. + */ + static inline int gpio_is_valid(int number) { - /* only some non-negative numbers are valid */ return ((unsigned)number) < ARCH_NR_GPIOS; } diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6e..0c991023ee47 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); int cgroup_scan_tasks(struct cgroup_scanner *scan); int cgroup_attach_task(struct cgroup *, struct task_struct *); -int cgroup_attach_task_current_cg(struct task_struct *); +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); + +static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) +{ + return cgroup_attach_task_all(current, tsk); +} /* * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works @@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats, } /* No cgroups - nothing to do */ +static inline int cgroup_attach_task_all(struct task_struct *from, + struct task_struct *t) +{ + return 0; +} static inline int cgroup_attach_task_current_cg(struct task_struct *t) { return 0; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2c958f4fce1e..926b50322a46 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); +extern int elevator_change(struct request_queue *, const char *); extern int elv_rq_merge_ok(struct request *, struct bio *); /* diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h index ee3049cb9ba5..52baa79d69a7 100644 --- a/include/linux/i2c/sx150x.h +++ b/include/linux/i2c/sx150x.h @@ -63,6 +63,9 @@ * IRQ lines will appear. Similarly to gpio_base, the expander * will create a block of irqs beginning at this number. * This value is ignored if irq_summary is < 0. + * @reset_during_probe: If set to true, the driver will trigger a full + * reset of the chip at the beginning of the probe + * in order to place it in a known state. */ struct sx150x_platform_data { unsigned gpio_base; @@ -73,6 +76,7 @@ struct sx150x_platform_data { u16 io_polarity; int irq_summary; unsigned irq_base; + bool reset_during_probe; }; #endif /* __LINUX_I2C_SX150X_H */ diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0a6b3d5c490c..7fb592793738 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping) } /* Atomic map/unmap */ -static inline void * +static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset, int slot) @@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, } static inline void -io_mapping_unmap_atomic(void *vaddr, int slot) +io_mapping_unmap_atomic(void __iomem *vaddr, int slot) { iounmap_atomic(vaddr, slot); } -static inline void * +static inline void __iomem * io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) { resource_size_t phys_addr; @@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) } static inline void -io_mapping_unmap(void *vaddr) +io_mapping_unmap(void __iomem *vaddr) { iounmap(vaddr); } @@ -125,38 +125,38 @@ struct io_mapping; static inline struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { - return (struct io_mapping *) ioremap_wc(base, size); + return (struct io_mapping __force *) ioremap_wc(base, size); } static inline void io_mapping_free(struct io_mapping *mapping) { - iounmap(mapping); + iounmap((void __force __iomem *) mapping); } /* Atomic map/unmap */ -static inline void * +static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset, int slot) { - return ((char *) mapping) + offset; + return ((char __force __iomem *) mapping) + offset; } static inline void -io_mapping_unmap_atomic(void *vaddr, int slot) +io_mapping_unmap_atomic(void __iomem *vaddr, int slot) { } /* Non-atomic map/unmap */ -static inline void * +static inline void __iomem * io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) { - return ((char *) mapping) + offset; + return ((char __force __iomem *) mapping) + offset; } static inline void -io_mapping_unmap(void *vaddr) +io_mapping_unmap(void __iomem *vaddr) { } diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 4aa95f203f3e..62dbee554f60 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val) */ #define kfifo_reset(fifo) \ (void)({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ __tmp->kfifo.in = __tmp->kfifo.out = 0; \ }) @@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val) */ #define kfifo_reset_out(fifo) \ (void)({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ __tmp->kfifo.out = __tmp->kfifo.in; \ }) @@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val) */ #define kfifo_len(fifo) \ ({ \ - typeof(fifo + 1) __tmpl = (fifo); \ + typeof((fifo) + 1) __tmpl = (fifo); \ __tmpl->kfifo.in - __tmpl->kfifo.out; \ }) @@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val) */ #define kfifo_is_empty(fifo) \ ({ \ - typeof(fifo + 1) __tmpq = (fifo); \ + typeof((fifo) + 1) __tmpq = (fifo); \ __tmpq->kfifo.in == __tmpq->kfifo.out; \ }) @@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val) */ #define kfifo_is_full(fifo) \ ({ \ - typeof(fifo + 1) __tmpq = (fifo); \ + typeof((fifo) + 1) __tmpq = (fifo); \ kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ }) @@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val) #define kfifo_avail(fifo) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmpq = (fifo); \ + typeof((fifo) + 1) __tmpq = (fifo); \ const size_t __recsize = sizeof(*__tmpq->rectype); \ unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ (__recsize) ? ((__avail <= __recsize) ? 0 : \ @@ -284,7 +284,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_skip(fifo) \ (void)({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ if (__recsize) \ @@ -302,7 +302,7 @@ __kfifo_must_check_helper( \ #define kfifo_peek_len(fifo) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ @@ -325,7 +325,7 @@ __kfifo_must_check_helper( \ #define kfifo_alloc(fifo, size, gfp_mask) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ __is_kfifo_ptr(__tmp) ? \ __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ @@ -339,7 +339,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_free(fifo) \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ if (__is_kfifo_ptr(__tmp)) \ __kfifo_free(__kfifo); \ @@ -358,7 +358,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_init(fifo, buffer, size) \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ __is_kfifo_ptr(__tmp) ? \ __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ @@ -379,8 +379,8 @@ __kfifo_must_check_helper( \ */ #define kfifo_put(fifo, val) \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ - typeof(val + 1) __val = (val); \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((val) + 1) __val = (val); \ unsigned int __ret; \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -421,8 +421,8 @@ __kfifo_must_check_helper( \ #define kfifo_get(fifo, val) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ - typeof(val + 1) __val = (val); \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((val) + 1) __val = (val); \ unsigned int __ret; \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -462,8 +462,8 @@ __kfifo_must_check_helper( \ #define kfifo_peek(fifo, val) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ - typeof(val + 1) __val = (val); \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((val) + 1) __val = (val); \ unsigned int __ret; \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -501,8 +501,8 @@ __kfifo_must_check_helper( \ */ #define kfifo_in(fifo, buf, n) \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ - typeof(buf + 1) __buf = (buf); \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((buf) + 1) __buf = (buf); \ unsigned long __n = (n); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -554,8 +554,8 @@ __kfifo_must_check_helper( \ #define kfifo_out(fifo, buf, n) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ - typeof(buf + 1) __buf = (buf); \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((buf) + 1) __buf = (buf); \ unsigned long __n = (n); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -611,7 +611,7 @@ __kfifo_must_check_helper( \ #define kfifo_from_user(fifo, from, len, copied) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ const void __user *__from = (from); \ unsigned int __len = (len); \ unsigned int *__copied = (copied); \ @@ -639,7 +639,7 @@ __kfifo_must_check_helper( \ #define kfifo_to_user(fifo, to, len, copied) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ void __user *__to = (to); \ unsigned int __len = (len); \ unsigned int *__copied = (copied); \ @@ -666,7 +666,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ struct scatterlist *__sgl = (sgl); \ int __nents = (nents); \ unsigned int __len = (len); \ @@ -690,7 +690,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_dma_in_finish(fifo, len) \ (void)({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ unsigned int __len = (len); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -717,7 +717,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ struct scatterlist *__sgl = (sgl); \ int __nents = (nents); \ unsigned int __len = (len); \ @@ -741,7 +741,7 @@ __kfifo_must_check_helper( \ */ #define kfifo_dma_out_finish(fifo, len) \ (void)({ \ - typeof(fifo + 1) __tmp = (fifo); \ + typeof((fifo) + 1) __tmp = (fifo); \ unsigned int __len = (len); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ @@ -766,8 +766,8 @@ __kfifo_must_check_helper( \ #define kfifo_out_peek(fifo, buf, n) \ __kfifo_must_check_helper( \ ({ \ - typeof(fifo + 1) __tmp = (fifo); \ - typeof(buf + 1) __buf = (buf); \ + typeof((fifo) + 1) __tmp = (fifo); \ + typeof((buf) + 1) __buf = (buf); \ unsigned long __n = (n); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 74d691ee9121..3319a6967626 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -16,6 +16,9 @@ struct stable_node; struct mem_cgroup; +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address); + #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); @@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_does_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); -static inline struct page *ksm_might_need_to_copy(struct page *page, +static inline int ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct anon_vma *anon_vma = page_anon_vma(page); - if (!anon_vma || - (anon_vma->root == vma->anon_vma->root && - page->index == linear_page_index(vma, address))) - return page; - - return ksm_does_need_to_copy(page, vma, address); + return anon_vma && + (anon_vma->root != vma->anon_vma->root || + page->index != linear_page_index(vma, address)); } int page_referenced_ksm(struct page *page, @@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, +static inline int ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { - return page; + return 0; } static inline int page_referenced_ksm(struct page *page, diff --git a/include/linux/lglock.h b/include/linux/lglock.h index b288cb713b90..f549056fb20b 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -150,7 +150,7 @@ int i; \ preempt_disable(); \ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_online_cpu(i) { \ + for_each_possible_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_lock(lock); \ @@ -161,7 +161,7 @@ void name##_global_unlock(void) { \ int i; \ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_online_cpu(i) { \ + for_each_possible_cpu(i) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_unlock(lock); \ diff --git a/include/linux/libata.h b/include/linux/libata.h index f010f18a0f86..45fb2967b66d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -335,6 +335,7 @@ enum { ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ ATA_EHI_QUIET = (1 << 3), /* be quiet */ + ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ @@ -723,6 +724,7 @@ struct ata_port { struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ u8 ctl; /* cache of ATA control register */ u8 last_ctl; /* Cache last written value */ + struct ata_link* sff_pio_task_link; /* link currently used */ struct delayed_work sff_pio_task; #ifdef CONFIG_ATA_BMDMA struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ @@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap); extern void ata_sff_irq_clear(struct ata_port *ap); extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq); -extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); +extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); extern unsigned int ata_sff_port_intr(struct ata_port *ap, diff --git a/include/linux/mm.h b/include/linux/mm.h index e6b1210772ce..74949fbef8c6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -864,6 +864,12 @@ int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); +/* Is the vma a continuation of the stack vma above it? */ +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +} + extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 329a8faa6e37..245cdacee544 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -38,6 +38,8 @@ * [8:0] Byte/block count */ +#define R4_MEMORY_PRESENT (1 << 27) + /* SDIO status in R5 Type diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6e6e62648a4d..3984c4eb41fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -284,6 +284,13 @@ struct zone { unsigned long watermark[NR_WMARK]; /* + * When free pages are below this point, additional steps are taken + * when reading the number of free pages to avoid per-cpu counter + * drift allowing watermarks to be breached + */ + unsigned long percpu_drift_mark; + + /* * We don't know if the memory that we're going to allocate will be freeable * or/and it will be released eventually, so to avoid totally wasting several * GB of ram we must reserve some of the lower zone memory (otherwise we risk @@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone) return test_bit(ZONE_OOM_LOCKED, &zone->flags); } +#ifdef CONFIG_SMP +unsigned long zone_nr_free_pages(struct zone *zone); +#else +#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) +#endif /* CONFIG_SMP */ + /* * The "priority" of VM scanning is how much of the queues we will scan in one * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 878cab4f5fcc..f363bc8fdc74 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -78,6 +78,14 @@ struct mutex_waiter { # include <linux/mutex-debug.h> #else # define __DEBUG_MUTEX_INITIALIZER(lockname) +/** + * mutex_init - initialize the mutex + * @mutex: the mutex to be initialized + * + * Initialize the mutex to unlocked state. + * + * It is not allowed to initialize an already locked mutex. + */ # define mutex_init(mutex) \ do { \ static struct lock_class_key __key; \ diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 7415839ac890..5310d27abd2a 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h @@ -26,6 +26,9 @@ struct semaphore { .wait_list = LIST_HEAD_INIT((name).wait_list), \ } +#define DEFINE_SEMAPHORE(name) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) + #define DECLARE_MUTEX(name) \ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fee51a11b73..7cdd63366f88 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -19,6 +19,7 @@ struct bio; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_PRIO_SHIFT 0 +#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ static inline int current_is_kswapd(void) { @@ -142,7 +143,7 @@ struct swap_extent { enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ - SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ + SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ @@ -315,6 +316,7 @@ extern long nr_swap_pages; extern long total_swap_pages; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); +extern swp_entry_t get_swap_page_of_type(int); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); @@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); struct backing_dev_info; -#ifdef CONFIG_HIBERNATION -void hibernation_freeze_swap(void); -void hibernation_thaw_swap(void); -swp_entry_t get_swap_for_hibernation(int type); -void swap_free_for_hibernation(swp_entry_t val); -#endif - /* linux/mm/thrash.c */ extern struct mm_struct *swap_token_mm; extern void grab_swap_token(struct mm_struct *); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7f43ccdc1d38..eaaea37b3b75 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, return x; } +/* + * More accurate version that also considers the currently pending + * deltas. For that we need to loop over all cpus to find the current + * deltas. There is no synchronization so the result cannot be + * exactly accurate either. + */ +static inline unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); + +#ifdef CONFIG_SMP + int cpu; + for_each_online_cpu(cpu) + x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; + + if (x < 0) + x = 0; +#endif + return x; +} + extern unsigned long global_reclaimable_pages(void); extern unsigned long zone_reclaimable_pages(struct zone *zone); diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 726cc3536409..ef6c24a529e1 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -27,11 +27,17 @@ struct cgroup_cls_state #ifdef CONFIG_NET_CLS_CGROUP static inline u32 task_cls_classid(struct task_struct *p) { + int classid; + if (in_interrupt()) return 0; - return container_of(task_subsys_state(p, net_cls_subsys_id), - struct cgroup_cls_state, css)->classid; + rcu_read_lock(); + classid = container_of(task_subsys_state(p, net_cls_subsys_id), + struct cgroup_cls_state, css)->classid; + rcu_read_unlock(); + + return classid; } #else extern int net_cls_subsys_id; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a4747a0f7303..f976885f686f 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) return csum_partial(diff, sizeof(diff), oldsum); } +extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, + int outin); + #endif /* __KERNEL__ */ #endif /* _NET_IP_VS_H */ diff --git a/include/net/sock.h b/include/net/sock.h index ac53bfbdfe16..adab9dc58183 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -752,6 +752,7 @@ struct proto { /* Keeping track of sk's, looking them up, and port selection methods. */ void (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); + void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); /* Keeping track of sockets in use */ diff --git a/include/net/udp.h b/include/net/udp.h index 7abdf305da50..a184d3496b13 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk) } extern void udp_lib_unhash(struct sock *sk); +extern void udp_lib_rehash(struct sock *sk, u16 new_hash); static inline void udp_lib_close(struct sock *sk, long timeout) { diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 192f88c5b0f9..c9483d8f6140 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1791,19 +1791,20 @@ out: } /** - * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup + * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' + * @from: attach to all cgroups of a given task * @tsk: the task to be attached */ -int cgroup_attach_task_current_cg(struct task_struct *tsk) +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) { struct cgroupfs_root *root; - struct cgroup *cur_cg; int retval = 0; cgroup_lock(); for_each_active_root(root) { - cur_cg = task_cgroup_from_root(current, root); - retval = cgroup_attach_task(cur_cg, tsk); + struct cgroup *from_cg = task_cgroup_from_root(from, root); + + retval = cgroup_attach_task(from_cg, tsk); if (retval) break; } @@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk) return retval; } -EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); +EXPORT_SYMBOL_GPL(cgroup_attach_task_all); /* * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index 75bd9b3ebbb7..20059ef4459a 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c @@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv) int i, bpno; kdb_bp_t *bp, *bp_check; int diag; - int free; char *symname = NULL; long offset = 0ul; int nextarg; @@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv) /* * Find an empty bp structure to allocate */ - free = KDB_MAXBPT; for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { if (bp->bp_free) break; diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f88a7a3..f83972b16564 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -33,10 +33,11 @@ * @children: child nodes * @all: list head for list of all nodes * @parent: parent node - * @info: associated profiling data structure if not a directory - * @ghost: when an object file containing profiling data is unloaded we keep a - * copy of the profiling data here to allow collecting coverage data - * for cleanup code. Such a node is called a "ghost". + * @loaded_info: array of pointers to profiling data sets for loaded object + * files. + * @num_loaded: number of profiling data sets for loaded object files. + * @unloaded_info: accumulated copy of profiling data sets for unloaded + * object files. Used only when gcov_persist=1. * @dentry: main debugfs entry, either a directory or data file * @links: associated symbolic links * @name: data file basename @@ -51,10 +52,11 @@ struct gcov_node { struct list_head children; struct list_head all; struct gcov_node *parent; - struct gcov_info *info; - struct gcov_info *ghost; + struct gcov_info **loaded_info; + struct gcov_info *unloaded_info; struct dentry *dentry; struct dentry **links; + int num_loaded; char name[0]; }; @@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { }; /* - * Return the profiling data set for a given node. This can either be the - * original profiling data structure or a duplicate (also called "ghost") - * in case the associated object file has been unloaded. + * Return a profiling data set associated with the given node. This is + * either a data set for a loaded object file or a data set copy in case + * all associated object files have been unloaded. */ static struct gcov_info *get_node_info(struct gcov_node *node) { - if (node->info) - return node->info; + if (node->num_loaded > 0) + return node->loaded_info[0]; - return node->ghost; + return node->unloaded_info; +} + +/* + * Return a newly allocated profiling data set which contains the sum of + * all profiling data associated with the given node. + */ +static struct gcov_info *get_accumulated_info(struct gcov_node *node) +{ + struct gcov_info *info; + int i = 0; + + if (node->unloaded_info) + info = gcov_info_dup(node->unloaded_info); + else + info = gcov_info_dup(node->loaded_info[i++]); + if (!info) + return NULL; + for (; i < node->num_loaded; i++) + gcov_info_add(info, node->loaded_info[i]); + + return info; } /* @@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) mutex_lock(&node_lock); /* * Read from a profiling data copy to minimize reference tracking - * complexity and concurrent access. + * complexity and concurrent access and to keep accumulating multiple + * profiling data sets associated with one node simple. */ - info = gcov_info_dup(get_node_info(node)); + info = get_accumulated_info(node); if (!info) goto out_unlock; iter = gcov_iter_new(info); @@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) return NULL; } +/* + * Reset all profiling data associated with the specified node. + */ +static void reset_node(struct gcov_node *node) +{ + int i; + + if (node->unloaded_info) + gcov_info_reset(node->unloaded_info); + for (i = 0; i < node->num_loaded; i++) + gcov_info_reset(node->loaded_info[i]); +} + static void remove_node(struct gcov_node *node); /* * write() implementation for gcov data files. Reset profiling data for the - * associated file. If the object file has been unloaded (i.e. this is - * a "ghost" node), remove the debug fs node as well. + * corresponding file. If all associated object files have been unloaded, + * remove the debug fs node as well. */ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) @@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, node = get_node_by_name(info->filename); if (node) { /* Reset counts or remove node for unloaded modules. */ - if (node->ghost) + if (node->num_loaded == 0) remove_node(node); else - gcov_info_reset(node->info); + reset_node(node); } /* Reset counts for open file. */ gcov_info_reset(info); @@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->all); - node->info = info; + if (node->loaded_info) { + node->loaded_info[0] = info; + node->num_loaded = 1; + } node->parent = parent; if (name) strcpy(node->name, name); @@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, struct gcov_node *node; node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); - if (!node) { - pr_warning("out of memory\n"); - return NULL; + if (!node) + goto err_nomem; + if (info) { + node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), + GFP_KERNEL); + if (!node->loaded_info) + goto err_nomem; } init_node(node, info, name, parent); /* Differentiate between gcov data file nodes and directory nodes. */ @@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, list_add(&node->all, &all_head); return node; + +err_nomem: + kfree(node); + pr_warning("out of memory\n"); + return NULL; } /* Remove symbolic links associated with node. */ @@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) list_del(&node->all); debugfs_remove(node->dentry); remove_links(node); - if (node->ghost) - gcov_info_free(node->ghost); + kfree(node->loaded_info); + if (node->unloaded_info) + gcov_info_free(node->unloaded_info); kfree(node); } @@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, /* * write() implementation for reset file. Reset all profiling data to zero - * and remove ghost nodes. + * and remove nodes for which all associated object files are unloaded. */ static ssize_t reset_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) @@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, mutex_lock(&node_lock); restart: list_for_each_entry(node, &all_head, all) { - if (node->info) - gcov_info_reset(node->info); + if (node->num_loaded > 0) + reset_node(node); else if (list_empty(&node->children)) { remove_node(node); /* Several nodes may have gone - restart loop. */ @@ -564,37 +614,115 @@ err_remove: } /* - * The profiling data set associated with this node is being unloaded. Store a - * copy of the profiling data and turn this node into a "ghost". + * Associate a profiling data set with an existing node. Needs to be called + * with node_lock held. */ -static int ghost_node(struct gcov_node *node) +static void add_info(struct gcov_node *node, struct gcov_info *info) { - node->ghost = gcov_info_dup(node->info); - if (!node->ghost) { - pr_warning("could not save data for '%s' (out of memory)\n", - node->info->filename); - return -ENOMEM; + struct gcov_info **loaded_info; + int num = node->num_loaded; + + /* + * Prepare new array. This is done first to simplify cleanup in + * case the new data set is incompatible, the node only contains + * unloaded data sets and there's not enough memory for the array. + */ + loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); + if (!loaded_info) { + pr_warning("could not add '%s' (out of memory)\n", + info->filename); + return; + } + memcpy(loaded_info, node->loaded_info, + num * sizeof(struct gcov_info *)); + loaded_info[num] = info; + /* Check if the new data set is compatible. */ + if (num == 0) { + /* + * A module was unloaded, modified and reloaded. The new + * data set replaces the copy of the last one. + */ + if (!gcov_info_is_compatible(node->unloaded_info, info)) { + pr_warning("discarding saved data for %s " + "(incompatible version)\n", info->filename); + gcov_info_free(node->unloaded_info); + node->unloaded_info = NULL; + } + } else { + /* + * Two different versions of the same object file are loaded. + * The initial one takes precedence. + */ + if (!gcov_info_is_compatible(node->loaded_info[0], info)) { + pr_warning("could not add '%s' (incompatible " + "version)\n", info->filename); + kfree(loaded_info); + return; + } } - node->info = NULL; + /* Overwrite previous array. */ + kfree(node->loaded_info); + node->loaded_info = loaded_info; + node->num_loaded = num + 1; +} - return 0; +/* + * Return the index of a profiling data set associated with a node. + */ +static int get_info_index(struct gcov_node *node, struct gcov_info *info) +{ + int i; + + for (i = 0; i < node->num_loaded; i++) { + if (node->loaded_info[i] == info) + return i; + } + return -ENOENT; } /* - * Profiling data for this node has been loaded again. Add profiling data - * from previous instantiation and turn this node into a regular node. + * Save the data of a profiling data set which is being unloaded. */ -static void revive_node(struct gcov_node *node, struct gcov_info *info) +static void save_info(struct gcov_node *node, struct gcov_info *info) { - if (gcov_info_is_compatible(node->ghost, info)) - gcov_info_add(info, node->ghost); + if (node->unloaded_info) + gcov_info_add(node->unloaded_info, info); else { - pr_warning("discarding saved data for '%s' (version changed)\n", + node->unloaded_info = gcov_info_dup(info); + if (!node->unloaded_info) { + pr_warning("could not save data for '%s' " + "(out of memory)\n", info->filename); + } + } +} + +/* + * Disassociate a profiling data set from a node. Needs to be called with + * node_lock held. + */ +static void remove_info(struct gcov_node *node, struct gcov_info *info) +{ + int i; + + i = get_info_index(node, info); + if (i < 0) { + pr_warning("could not remove '%s' (not found)\n", info->filename); + return; } - gcov_info_free(node->ghost); - node->ghost = NULL; - node->info = info; + if (gcov_persist) + save_info(node, info); + /* Shrink array. */ + node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; + node->num_loaded--; + if (node->num_loaded > 0) + return; + /* Last loaded data set was removed. */ + kfree(node->loaded_info); + node->loaded_info = NULL; + node->num_loaded = 0; + if (!node->unloaded_info) + remove_node(node); } /* @@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) node = get_node_by_name(info->filename); switch (action) { case GCOV_ADD: - /* Add new node or revive ghost. */ - if (!node) { + if (node) + add_info(node, info); + else add_node(info); - break; - } - if (gcov_persist) - revive_node(node, info); - else { - pr_warning("could not add '%s' (already exists)\n", - info->filename); - } break; case GCOV_REMOVE: - /* Remove node or turn into ghost. */ - if (!node) { + if (node) + remove_info(node, info); + else { pr_warning("could not remove '%s' (not found)\n", info->filename); - break; } - if (gcov_persist) { - if (!ghost_node(node)) - break; - } - remove_node(node); break; } mutex_unlock(&node_lock); diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916c9492..253dc0f35cf4 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) right = group_info->ngroups; while (left < right) { unsigned int mid = (left+right)/2; - int cmp = grp - GROUP_AT(group_info, mid); - if (cmp > 0) + if (grp > GROUP_AT(group_info, mid)) left = mid + 1; - else if (cmp < 0) + else if (grp < GROUP_AT(group_info, mid)) right = mid; else return 1; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index ce669174f355..1decafbb6b1a 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); */ ktime_t hrtimer_get_remaining(const struct hrtimer *timer) { - struct hrtimer_clock_base *base; unsigned long flags; ktime_t rem; - base = lock_hrtimer_base(timer, &flags); + lock_hrtimer_base(timer, &flags); rem = hrtimer_expires_remaining(timer); unlock_hrtimer_base(timer, &flags); diff --git a/kernel/mutex.c b/kernel/mutex.c index 4c0b7b3e6d2e..200407c1502f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -36,15 +36,6 @@ # include <asm/mutex.h> #endif -/*** - * mutex_init - initialize the mutex - * @lock: the mutex to be initialized - * @key: the lock_class_key for the class; used by mutex lock debugging - * - * Initialize the mutex to unlocked state. - * - * It is not allowed to initialize an already locked mutex. - */ void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) { @@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init); static __used noinline void __sched __mutex_lock_slowpath(atomic_t *lock_count); -/*** +/** * mutex_lock - acquire the mutex * @lock: the mutex to be acquired * @@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock); static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); -/*** +/** * mutex_unlock - release the mutex * @lock: the mutex to be released * @@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count); static noinline int __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count); -/*** - * mutex_lock_interruptible - acquire the mutex, interruptable +/** + * mutex_lock_interruptible - acquire the mutex, interruptible * @lock: the mutex to be acquired * * Lock the mutex like mutex_lock(), and return 0 if the mutex has @@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) return prev == 1; } -/*** - * mutex_trylock - try acquire the mutex, without waiting +/** + * mutex_trylock - try to acquire the mutex, without waiting * @lock: the mutex to be acquired * * Try to acquire the mutex atomically. Returns 1 if the mutex * has been acquired successfully, and 0 on contention. * * NOTE: this function follows the spin_trylock() convention, so - * it is negated to the down_trylock() return values! Be careful + * it is negated from the down_trylock() return values! Be careful * about this when converting semaphore users to mutexes. * * This function must not be used in interrupt context. The diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 403d1804b198..db5b56064687 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event) } } +static inline int +event_filter_match(struct perf_event *event) +{ + return event->cpu == -1 || event->cpu == smp_processor_id(); +} + static void event_sched_out(struct perf_event *event, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { + u64 delta; + /* + * An event which could not be activated because of + * filter mismatch still needs to have its timings + * maintained, otherwise bogus information is return + * via read() for time_enabled, time_running: + */ + if (event->state == PERF_EVENT_STATE_INACTIVE + && !event_filter_match(event)) { + delta = ctx->time - event->tstamp_stopped; + event->tstamp_running += delta; + event->tstamp_stopped = ctx->time; + } + if (event->state != PERF_EVENT_STATE_ACTIVE) return; @@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx) { struct perf_event *event; - - if (group_event->state != PERF_EVENT_STATE_ACTIVE) - return; + int state = group_event->state; event_sched_out(group_event, cpuctx, ctx); @@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event, list_for_each_entry(event, &group_event->sibling_list, group_entry) event_sched_out(event, cpuctx, ctx); - if (group_event->attr.exclusive) + if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) cpuctx->exclusive = 0; } @@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: + case CPU_DOWN_FAILED: perf_event_init_cpu(cpu); break; + case CPU_UP_CANCELED: case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: perf_event_exit_cpu(cpu); break; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index c77963938bca..8dc31e02ae12 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode) goto Close; suspend_console(); - hibernation_freeze_swap(); saved_mask = clear_gfp_allowed_mask(GFP_IOFS); error = dpm_suspend_start(PMSG_FREEZE); if (error) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5209b39e6982..d3f795f01bbc 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1086,7 +1086,6 @@ void swsusp_free(void) buffer = NULL; alloc_normal = 0; alloc_highmem = 0; - hibernation_thaw_swap(); } /* Helper functions used for the shrinking of memory. */ diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 5d0059eed3e4..e6a5bdf61a37 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap) { unsigned long offset; - offset = swp_offset(get_swap_for_hibernation(swap)); + offset = swp_offset(get_swap_page_of_type(swap)); if (offset) { if (swsusp_extents_insert(offset)) - swap_free_for_hibernation(swp_entry(swap, offset)); + swap_free(swp_entry(swap, offset)); else return swapdev_block(swap, offset); } @@ -163,7 +163,7 @@ void free_all_swap_pages(int swap) ext = container_of(node, struct swsusp_extent, node); rb_erase(node, &swsusp_extents); for (offset = ext->start; offset <= ext->end; offset++) - swap_free_for_hibernation(swp_entry(swap, offset)); + swap_free(swp_entry(swap, offset)); kfree(ext); } diff --git a/kernel/sched.c b/kernel/sched.c index 09b574e7f4df..ed09d4f2a69c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p) static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } + +static void sched_avg_update(struct rq *rq) +{ +} #endif /* CONFIG_SMP */ #if BITS_PER_LONG == 32 @@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq) this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } + + sched_avg_update(this_rq); } static void update_cpu_load_active(struct rq *this_rq) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ab661ebc4895..9b5b4f86b742 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1313,7 +1313,7 @@ static struct sched_group * find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu, int load_idx) { - struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; + struct sched_group *idlest = NULL, *group = sd->groups; unsigned long min_load = ULONG_MAX, this_load = 0; int imbalance = 100 + (sd->imbalance_pct-100)/2; @@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, if (local_group) { this_load = avg_load; - this = group; } else if (avg_load < min_load) { min_load = avg_load; idlest = group; @@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu) struct rq *rq = cpu_rq(cpu); u64 total, available; - sched_avg_update(rq); - total = sched_avg_period() + (rq->clock - rq->age_stamp); available = total - rq->rt_avg; diff --git a/kernel/sys.c b/kernel/sys.c index e9ad44489828..7f5a0cd296a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) pgid = pid; if (pgid < 0) return -EINVAL; + rcu_read_lock(); /* From this point forward we keep holding onto the tasklist lock * so that our parent does not change from under us. -DaveM @@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) out: /* All paths lead to here, thus we are safe. -DaveM */ write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); return err; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ca38e8e3e907..f88552c6d227 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1713,10 +1713,7 @@ static __init int sysctl_init(void) { sysctl_set_parent(NULL, root_table); #ifdef CONFIG_SYSCTL_SYSCALL_CHECK - { - int err; - err = sysctl_check_table(current->nsproxy, root_table); - } + sysctl_check_table(current->nsproxy, root_table); #endif return 0; } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0d88ce9b9fb8..fa7ece649fe1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v) { struct ftrace_profile *rec = v; char str[KSYM_SYMBOL_LEN]; + int ret = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - static DEFINE_MUTEX(mutex); static struct trace_seq s; unsigned long long avg; unsigned long long stddev; #endif + mutex_lock(&ftrace_profile_lock); + + /* we raced with function_profile_reset() */ + if (unlikely(rec->counter == 0)) { + ret = -EBUSY; + goto out; + } kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); seq_printf(m, " %-30.30s %10lu", str, rec->counter); @@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v) do_div(stddev, (rec->counter - 1) * 1000); } - mutex_lock(&mutex); trace_seq_init(&s); trace_print_graph_duration(rec->time, &s); trace_seq_puts(&s, " "); @@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v) trace_seq_puts(&s, " "); trace_print_graph_duration(stddev, &s); trace_print_seq(m, &s); - mutex_unlock(&mutex); #endif seq_putc(m, '\n'); +out: + mutex_unlock(&ftrace_profile_lock); - return 0; + return ret; } static void ftrace_profile_reset(struct ftrace_profile_stat *stat) @@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; + /* reset in case of seek/pread */ + iter->flags &= ~FTRACE_ITER_HASH; return iter; } @@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_regex_lseek, + .llseek = no_llseek, .release = ftrace_filter_release, }; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19cccc3c3028..492197e2f86c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) static void rb_advance_iter(struct ring_buffer_iter *iter) { - struct ring_buffer *buffer; struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; unsigned length; cpu_buffer = iter->cpu_buffer; - buffer = cpu_buffer->buffer; /* * Check if we are at the end of the buffer. diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 000e6e85b445..31cc4cb0dbf2 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event) tp_event->class && tp_event->class->reg && try_module_get(tp_event->mod)) { ret = perf_trace_event_init(tp_event, p_event); + if (ret) + module_put(tp_event->mod); break; } } @@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event) } } out: + module_put(tp_event->mod); mutex_unlock(&event_mutex); } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8b27c9849b42..544301d29dee 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs); -/* Check the name is good for event/group */ -static int check_event_name(const char *name) +/* Check the name is good for event/group/fields */ +static int is_good_name(const char *name) { if (!isalpha(*name) && *name != '_') return 0; @@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, else tp->rp.kp.pre_handler = kprobe_dispatcher; - if (!event || !check_event_name(event)) { + if (!event || !is_good_name(event)) { ret = -EINVAL; goto error; } @@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, if (!tp->call.name) goto error; - if (!group || !check_event_name(group)) { + if (!group || !is_good_name(group)) { ret = -EINVAL; goto error; } @@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv) int i, ret = 0; int is_return = 0, is_delete = 0; char *symbol = NULL, *event = NULL, *group = NULL; - char *arg, *tmp; + char *arg; unsigned long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; @@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv) /* parse arguments */ ret = 0; for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { + /* Increment count for freeing args in error case */ + tp->nr_args++; + /* Parse argument name */ arg = strchr(argv[i], '='); - if (arg) + if (arg) { *arg++ = '\0'; - else + tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); + } else { arg = argv[i]; + /* If argument name is omitted, set "argN" */ + snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); + tp->args[i].name = kstrdup(buf, GFP_KERNEL); + } - tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); if (!tp->args[i].name) { - pr_info("Failed to allocate argument%d name '%s'.\n", - i, argv[i]); + pr_info("Failed to allocate argument[%d] name.\n", i); ret = -ENOMEM; goto error; } - tmp = strchr(tp->args[i].name, ':'); - if (tmp) - *tmp = '_'; /* convert : to _ */ + + if (!is_good_name(tp->args[i].name)) { + pr_info("Invalid argument[%d] name: %s\n", + i, tp->args[i].name); + ret = -EINVAL; + goto error; + } if (conflict_field_name(tp->args[i].name, tp->args, i)) { - pr_info("Argument%d name '%s' conflicts with " + pr_info("Argument[%d] name '%s' conflicts with " "another field.\n", i, argv[i]); ret = -EINVAL; goto error; @@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv) /* Parse fetch argument */ ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); if (ret) { - pr_info("Parse error at argument%d. (%d)\n", i, ret); - kfree(tp->args[i].name); + pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; } - - tp->nr_args++; } ret = register_trace_probe(tp); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b1..7f9c3c52ecc1 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -122,7 +122,7 @@ static void __touch_watchdog(void) void touch_softlockup_watchdog(void) { - __get_cpu_var(watchdog_touch_ts) = 0; + __raw_get_cpu_var(watchdog_touch_ts) = 0; } EXPORT_SYMBOL(touch_softlockup_watchdog); @@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void) #ifdef CONFIG_HARDLOCKUP_DETECTOR void touch_nmi_watchdog(void) { - __get_cpu_var(watchdog_nmi_touch) = true; + if (watchdog_enabled) { + unsigned cpu; + + for_each_present_cpu(cpu) { + if (per_cpu(watchdog_nmi_touch, cpu) != true) + per_cpu(watchdog_nmi_touch, cpu) = true; + } + } touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); @@ -433,6 +440,9 @@ static int watchdog_enable(int cpu) wake_up_process(p); } + /* if any cpu succeeds, watchdog is considered enabled for the system */ + watchdog_enabled = 1; + return 0; } @@ -455,9 +465,6 @@ static void watchdog_disable(int cpu) per_cpu(softlockup_watchdog, cpu) = NULL; kthread_stop(p); } - - /* if any cpu succeeds, watchdog is considered enabled for the system */ - watchdog_enabled = 1; } static void watchdog_enable_all_cpus(void) diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a5ec42868f99..4ceb05d772ae 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, left -= sg_size; sg = alloc_fn(alloc_size, gfp_mask); - if (unlikely(!sg)) - return -ENOMEM; + if (unlikely(!sg)) { + /* + * Adjust entry count to reflect that the last + * entry of the previous table won't be used for + * linkage. Without this, sg_kfree() may get + * confused. + */ + if (prv) + table->nents = ++table->orig_nents; + + return -ENOMEM; + } sg_init_table(sg, alloc_size); table->nents = table->orig_nents += sg_size; diff --git a/mm/Kconfig b/mm/Kconfig index f4e516e9c37c..f0fb9124e410 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -189,7 +189,7 @@ config COMPACTION config MIGRATION bool "Page migration" def_bool y - depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE + depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION help Allows the migration of the physical location of pages of processes while the virtual addresses are not changed. This is useful in diff --git a/mm/backing-dev.c b/mm/backing-dev.c index eaa4a5bbe063..c2bf86f470ed 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -445,8 +445,8 @@ static int bdi_forker_thread(void *ptr) switch (action) { case FORK_THREAD: __set_current_state(TASK_RUNNING); - task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", - dev_name(bdi->dev)); + task = kthread_create(bdi_writeback_thread, &bdi->wb, + "flush-%s", dev_name(bdi->dev)); if (IS_ERR(task)) { /* * If thread creation fails, force writeout of @@ -457,10 +457,13 @@ static int bdi_forker_thread(void *ptr) /* * The spinlock makes sure we do not lose * wake-ups when racing with 'bdi_queue_work()'. + * And as soon as the bdi thread is visible, we + * can start it. */ spin_lock_bh(&bdi->wb_lock); bdi->wb.task = task; spin_unlock_bh(&bdi->wb_lock); + wake_up_process(task); } break; diff --git a/mm/bounce.c b/mm/bounce.c index 13b6dad1eed2..1481de68184b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) */ vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; - flush_dcache_page(tovec->bv_page); bounce_copy_vec(tovec, vfrom); + flush_dcache_page(tovec->bv_page); } } diff --git a/mm/compaction.c b/mm/compaction.c index 94cce51b0b35..4d709ee59013 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) /* Similar to reclaim, but different enough that they don't share logic */ static bool too_many_isolated(struct zone *zone) { - - unsigned long inactive, isolated; + unsigned long active, inactive, isolated; inactive = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_ANON); + active = zone_page_state(zone, NR_ACTIVE_FILE) + + zone_page_state(zone, NR_ACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_FILE) + zone_page_state(zone, NR_ISOLATED_ANON); - return isolated > inactive; + return isolated > (inactive + active) / 2; } /* @@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page, { struct page *new_page; - unlock_page(page); /* any racers will COW it, not modify it */ - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (new_page) { copy_user_highpage(new_page, page, address, vma); @@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page, add_page_to_unevictable_list(new_page); } - page_cache_release(page); return new_page; } diff --git a/mm/memory.c b/mm/memory.c index 6b2ab1051851..71b161b73bb5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned int flags, pte_t orig_pte) { spinlock_t *ptl; - struct page *page; + struct page *page, *swapcache = NULL; swp_entry_t entry; pte_t pte; struct mem_cgroup *ptr = NULL; @@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - page = ksm_might_need_to_copy(page, vma, address); - if (!page) { - ret = VM_FAULT_OOM; - goto out; + /* + * Make sure try_to_free_swap didn't release the swapcache + * from under us. The page pin isn't enough to prevent that. + */ + if (unlikely(!PageSwapCache(page))) + goto out_page; + + if (ksm_might_need_to_copy(page, vma, address)) { + swapcache = page; + page = ksm_does_need_to_copy(page, vma, address); + + if (unlikely(!page)) { + ret = VM_FAULT_OOM; + page = swapcache; + swapcache = NULL; + goto out_page; + } } if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { @@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); + if (swapcache) { + /* + * Hold the lock to avoid the swap entry to be reused + * until we take the PT lock for the pte_same() check + * (to avoid false positives from pte_same). For + * further safety release the lock after the swap_free + * so that the swap count won't change under a + * parallel locked swapcache. + */ + unlock_page(swapcache); + page_cache_release(swapcache); + } if (flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); @@ -2756,6 +2781,10 @@ out_page: unlock_page(page); out_release: page_cache_release(page); + if (swapcache) { + unlock_page(swapcache); + page_cache_release(swapcache); + } return ret; } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a4cfcdc00455..dd186c1a5d53 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page) /* Return the start of the next active pageblock after a given page */ static struct page *next_active_pageblock(struct page *page) { - int pageblocks_stride; - /* Ensure the starting page is pageblock-aligned */ BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); - /* Move forward by at least 1 * pageblock_nr_pages */ - pageblocks_stride = 1; - /* If the entire pageblock is free, move to the end of free page */ - if (pageblock_free(page)) - pageblocks_stride += page_order(page) - pageblock_order; + if (pageblock_free(page)) { + int order; + /* be careful. we don't have locks, page_order can be changed.*/ + order = page_order(page); + if ((order < MAX_ORDER) && (order >= pageblock_order)) + return page + (1 << order); + } - return page + (pageblocks_stride * pageblock_nr_pages); + return page + pageblock_nr_pages; } /* Checks if this range of memory is likely to be hot-removable. */ diff --git a/mm/mlock.c b/mm/mlock.c index cbae7c5b9568..b70919ce4f72 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page) } } -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) { return (vma->vm_flags & VM_GROWSDOWN) && diff --git a/mm/mmzone.c b/mm/mmzone.c index f5b7d1760213..e35bfb82c855 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn, return 1; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ + +#ifdef CONFIG_SMP +/* Called when a more accurate view of NR_FREE_PAGES is needed */ +unsigned long zone_nr_free_pages(struct zone *zone) +{ + unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES); + + /* + * While kswapd is awake, it is considered the zone is under some + * memory pressure. Under pressure, there is a risk that + * per-cpu-counter-drift will allow the min watermark to be breached + * potentially causing a live-lock. While kswapd is awake and + * free pages are low, get a better estimate for free pages + */ + if (nr_free_pages < zone->percpu_drift_mark && + !waitqueue_active(&zone->zone_pgdat->kswapd_wait)) + return zone_page_state_snapshot(zone, NR_FREE_PAGES); + + return nr_free_pages; +} +#endif /* CONFIG_SMP */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a9649f4b261e..a8cfa9cc6e86 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, { int migratetype = 0; int batch_free = 0; + int to_free = count; spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; - __mod_zone_page_state(zone, NR_FREE_PAGES, count); - while (count) { + while (to_free) { struct page *page; struct list_head *list; @@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, zone, 0, page_private(page)); trace_mm_page_pcpu_drain(page, 0, page_private(page)); - } while (--count && --batch_free && !list_empty(list)); + } while (--to_free && --batch_free && !list_empty(list)); } + __mod_zone_page_state(zone, NR_FREE_PAGES, count); spin_unlock(&zone->lock); } @@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, zone->all_unreclaimable = 0; zone->pages_scanned = 0; - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); __free_one_page(page, zone, order, migratetype); + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); spin_unlock(&zone->lock); } @@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, { /* free_pages my go negative - that's OK */ long min = mark; - long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; + long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; int o; if (alloc_flags & ALLOC_HIGH) @@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; struct reclaim_state reclaim_state; struct task_struct *p = current; + bool drained = false; cond_resched(); @@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, cond_resched(); - if (order != 0) - drain_all_pages(); + if (unlikely(!(*did_some_progress))) + return NULL; - if (likely(*did_some_progress)) - page = get_page_from_freelist(gfp_mask, nodemask, order, +retry: + page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags, preferred_zone, migratetype); + + /* + * If an allocation failed after direct reclaim, it could be because + * pages are pinned on the per-cpu lists. Drain them and try again + */ + if (!page && !drained) { + drain_all_pages(); + drained = true; + goto retry; + } + return page; } @@ -2423,7 +2436,7 @@ void show_free_areas(void) " all_unreclaimable? %s" "\n", zone->name, - K(zone_page_state(zone, NR_FREE_PAGES)), + K(zone_nr_free_pages(zone)), K(min_wmark_pages(zone)), K(low_wmark_pages(zone)), K(high_wmark_pages(zone)), diff --git a/mm/swapfile.c b/mm/swapfile.c index 1f3f9c59a73a..7c703ff2f36f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -47,8 +47,6 @@ long nr_swap_pages; long total_swap_pages; static int least_priority; -static bool swap_for_hibernation; - static const char Bad_file[] = "Bad swap file entry "; static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; @@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si) nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_KERNEL, - BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); + nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); if (err) return err; cond_resched(); @@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si) nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_KERNEL, - BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); + nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); if (err) break; @@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | - BLKDEV_IFL_BARRIER)) + nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) break; } @@ -320,10 +315,8 @@ checks: if (offset > si->highest_bit) scan_base = offset = si->lowest_bit; - /* reuse swap entry of cache-only swap if not hibernation. */ - if (vm_swap_full() - && usage == SWAP_HAS_CACHE - && si->swap_map[offset] == SWAP_HAS_CACHE) { + /* reuse swap entry of cache-only swap if not busy. */ + if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; spin_unlock(&swap_lock); swap_was_freed = __try_to_reclaim_swap(si, offset); @@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void) spin_lock(&swap_lock); if (nr_swap_pages <= 0) goto noswap; - if (swap_for_hibernation) - goto noswap; nr_swap_pages--; for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { @@ -487,6 +478,28 @@ noswap: return (swp_entry_t) {0}; } +/* The only caller of this function is now susupend routine */ +swp_entry_t get_swap_page_of_type(int type) +{ + struct swap_info_struct *si; + pgoff_t offset; + + spin_lock(&swap_lock); + si = swap_info[type]; + if (si && (si->flags & SWP_WRITEOK)) { + nr_swap_pages--; + /* This is called for allocating swap entry, not cache */ + offset = scan_swap_map(si, 1); + if (offset) { + spin_unlock(&swap_lock); + return swp_entry(type, offset); + } + nr_swap_pages++; + } + spin_unlock(&swap_lock); + return (swp_entry_t) {0}; +} + static struct swap_info_struct *swap_info_get(swp_entry_t entry) { struct swap_info_struct *p; @@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page) if (page_swapcount(page)) return 0; + /* + * Once hibernation has begun to create its image of memory, + * there's a danger that one of the calls to try_to_free_swap() + * - most probably a call from __try_to_reclaim_swap() while + * hibernation is allocating its own swap pages for the image, + * but conceivably even a call from memory reclaim - will free + * the swap from a page which has already been recorded in the + * image as a clean swapcache page, and then reuse its swap for + * another page of the image. On waking from hibernation, the + * original page might be freed under memory pressure, then + * later read back in from swap, now with the wrong data. + * + * Hibernation clears bits from gfp_allowed_mask to prevent + * memory reclaim from writing to disk, so check that here. + */ + if (!(gfp_allowed_mask & __GFP_IO)) + return 0; + delete_from_swap_cache(page); SetPageDirty(page); return 1; @@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) #endif #ifdef CONFIG_HIBERNATION - -static pgoff_t hibernation_offset[MAX_SWAPFILES]; -/* - * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise, - * saved swap_map[] image to the disk will be an incomplete because it's - * changing without synchronization with hibernation snap shot. - * At resume, we just make swap_for_hibernation=false. We can forget - * used maps easily. - */ -void hibernation_freeze_swap(void) -{ - int i; - - spin_lock(&swap_lock); - - printk(KERN_INFO "PM: Freeze Swap\n"); - swap_for_hibernation = true; - for (i = 0; i < MAX_SWAPFILES; i++) - hibernation_offset[i] = 1; - spin_unlock(&swap_lock); -} - -void hibernation_thaw_swap(void) -{ - spin_lock(&swap_lock); - if (swap_for_hibernation) { - printk(KERN_INFO "PM: Thaw Swap\n"); - swap_for_hibernation = false; - } - spin_unlock(&swap_lock); -} - -/* - * Because updateing swap_map[] can make not-saved-status-change, - * we use our own easy allocator. - * Please see kernel/power/swap.c, Used swaps are recorded into - * RB-tree. - */ -swp_entry_t get_swap_for_hibernation(int type) -{ - pgoff_t off; - swp_entry_t val = {0}; - struct swap_info_struct *si; - - spin_lock(&swap_lock); - - si = swap_info[type]; - if (!si || !(si->flags & SWP_WRITEOK)) - goto done; - - for (off = hibernation_offset[type]; off < si->max; ++off) { - if (!si->swap_map[off]) - break; - } - if (off < si->max) { - val = swp_entry(type, off); - hibernation_offset[type] = off + 1; - } -done: - spin_unlock(&swap_lock); - return val; -} - -void swap_free_for_hibernation(swp_entry_t ent) -{ - /* Nothing to do */ -} - /* * Find the swap type that corresponds to given device (if any). * @@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->flags |= SWP_SOLIDSTATE; p->cluster_next = 1 + (random32() % p->highest_bit); } - if (discard_swap(p) == 0) + if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) p->flags |= SWP_DISCARDABLE; } diff --git a/mm/vmstat.c b/mm/vmstat.c index f389168f9a83..355a9e669aaa 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void) int threshold; for_each_populated_zone(zone) { + unsigned long max_drift, tolerate_drift; + threshold = calculate_threshold(zone); for_each_online_cpu(cpu) per_cpu_ptr(zone->pageset, cpu)->stat_threshold = threshold; + + /* + * Only set percpu_drift_mark if there is a danger that + * NR_FREE_PAGES reports the low watermark is ok when in fact + * the min watermark could be breached by an allocation + */ + tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); + max_drift = num_online_cpus() * threshold; + if (max_drift > tolerate_drift) + zone->percpu_drift_mark = high_wmark_pages(zone) + + max_drift; } } @@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, "\n scanned %lu" "\n spanned %lu" "\n present %lu", - zone_page_state(zone, NR_FREE_PAGES), + zone_nr_free_pages(zone), min_wmark_pages(zone), low_wmark_pages(zone), high_wmark_pages(zone), @@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: + refresh_zone_stat_thresholds(); start_cpu_timer(cpu); node_set_state(cpu_to_node(cpu), N_CPU); break; diff --git a/net/core/dev.c b/net/core/dev.c index 3721fbb9a83c..b9b22a3c4c8f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) { int queue_index; - struct sock *sk = skb->sk; + const struct net_device_ops *ops = dev->netdev_ops; - queue_index = sk_tx_queue_get(sk); - if (queue_index < 0) { - const struct net_device_ops *ops = dev->netdev_ops; + if (ops->ndo_select_queue) { + queue_index = ops->ndo_select_queue(dev, skb); + queue_index = dev_cap_txqueue(dev, queue_index); + } else { + struct sock *sk = skb->sk; + queue_index = sk_tx_queue_get(sk); + if (queue_index < 0) { - if (ops->ndo_select_queue) { - queue_index = ops->ndo_select_queue(dev, skb); - queue_index = dev_cap_txqueue(dev, queue_index); - } else { queue_index = 0; if (dev->real_num_tx_queues > 1) queue_index = skb_tx_hash(dev, skb); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26396ff67cf9..c83b421341c0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2706,7 +2706,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) } else if (skb_gro_len(p) != pinfo->gso_size) return -E2BIG; - headroom = NET_SKB_PAD + NET_IP_ALIGN; + headroom = skb_headroom(p); nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); if (unlikely(!nskb)) return -ENOMEM; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index f0550941df7b..721a8a37b45c 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } if (!inet->inet_saddr) inet->inet_saddr = rt->rt_src; /* Update source address */ - if (!inet->inet_rcv_saddr) + if (!inet->inet_rcv_saddr) { inet->inet_rcv_saddr = rt->rt_src; + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); + } inet->inet_daddr = rt->rt_dst; inet->inet_dport = usin->sin_port; sk->sk_state = TCP_ESTABLISHED; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index a43968918350..7d02a9f999fa 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, struct fib_result res; int no_addr, rpf, accept_local; + bool dev_match; int ret; struct net *net; @@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, } *spec_dst = FIB_RES_PREFSRC(res); fib_combine_itag(itag, &res); + dev_match = false; + #ifdef CONFIG_IP_ROUTE_MULTIPATH - if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1) + for (ret = 0; ret < res.fi->fib_nhs; ret++) { + struct fib_nh *nh = &res.fi->fib_nh[ret]; + + if (nh->nh_dev == dev) { + dev_match = true; + break; + } + } #else if (FIB_RES_DEV(res) == dev) + dev_match = true; #endif - { + if (dev_match) { ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; fib_res_put(&res); return ret; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 79d057a939ba..4a8e370862bc 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node) { struct tnode *ret = node_parent(node); - return rcu_dereference(ret); + return rcu_dereference_check(ret, + rcu_read_lock_held() || + lockdep_rtnl_is_held()); } /* Same as rcu_assign_pointer @@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c) static struct leaf *trie_firstleaf(struct trie *t) { - struct tnode *n = (struct tnode *) rcu_dereference(t->trie); + struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie, + rcu_read_lock_held() || + lockdep_rtnl_is_held()); if (!n) return NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3f56b6e6c6aa..6298f75d5e93 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2738,6 +2738,11 @@ slow_output: } EXPORT_SYMBOL_GPL(__ip_route_output_key); +static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) +{ + return NULL; +} + static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) { } @@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .destroy = ipv4_dst_destroy, - .check = ipv4_dst_check, + .check = ipv4_blackhole_dst_check, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .entries = ATOMIC_INIT(0), }; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 32e0bef60d0a..fb23c2e63b52 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk) } EXPORT_SYMBOL(udp_lib_unhash); +/* + * inet_rcv_saddr was changed, we must rehash secondary hash + */ +void udp_lib_rehash(struct sock *sk, u16 newhash) +{ + if (sk_hashed(sk)) { + struct udp_table *udptable = sk->sk_prot->h.udp_table; + struct udp_hslot *hslot, *hslot2, *nhslot2; + + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + nhslot2 = udp_hashslot2(udptable, newhash); + udp_sk(sk)->udp_portaddr_hash = newhash; + if (hslot2 != nhslot2) { + hslot = udp_hashslot(udptable, sock_net(sk), + udp_sk(sk)->udp_port_hash); + /* we must lock primary chain too */ + spin_lock_bh(&hslot->lock); + + spin_lock(&hslot2->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); + hslot2->count--; + spin_unlock(&hslot2->lock); + + spin_lock(&nhslot2->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, + &nhslot2->head); + nhslot2->count++; + spin_unlock(&nhslot2->lock); + + spin_unlock_bh(&hslot->lock); + } + } +} +EXPORT_SYMBOL(udp_lib_rehash); + +static void udp_v4_rehash(struct sock *sk) +{ + u16 new_hash = udp4_portaddr_hash(sock_net(sk), + inet_sk(sk)->inet_rcv_saddr, + inet_sk(sk)->inet_num); + udp_lib_rehash(sk, new_hash); +} + static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; @@ -1843,6 +1886,7 @@ struct proto udp_prot = { .backlog_rcv = __udp_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7d929a22cbc2..ef371aa01ac5 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -105,9 +105,12 @@ ipv4_connected: if (ipv6_addr_any(&np->saddr)) ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); - if (ipv6_addr_any(&np->rcv_saddr)) + if (ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); + } goto out; } @@ -181,6 +184,8 @@ ipv4_connected: if (ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); inet->inet_rcv_saddr = LOOPBACK4_IPV6; + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); } ip6_dst_store(sk, dst, diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 13ef5bc05cf5..578f3c1a16db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb) kfree_skb(NFCT_FRAG6_CB(skb)->orig); } -/* Memory Tracking Functions. */ -static void frag_kfree_skb(struct sk_buff *skb) -{ - atomic_sub(skb->truesize, &nf_init_frags.mem); - nf_skb_free(skb); - kfree_skb(skb); -} - /* Destruction primitives. */ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) @@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, } found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. - */ - if (prev) { - int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset; - - if (i > 0) { - offset += i; - if (end <= offset) { - pr_debug("overlap\n"); - goto err; - } - if (!pskb_pull(skb, i)) { - pr_debug("Can't pull\n"); - goto err; - } - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } - - /* Look for overlap with succeeding segments. - * If we can merge fragments, do it. + /* RFC5722, Section 4: + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments, including those not yet received) MUST be silently + * discarded. */ - while (next && NFCT_FRAG6_CB(next)->offset < end) { - /* overlap is 'i' bytes */ - int i = end - NFCT_FRAG6_CB(next)->offset; - - if (i < next->len) { - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - pr_debug("Eat head of the overlapped parts.: %d", i); - if (!pskb_pull(next, i)) - goto err; - /* next fragment */ - NFCT_FRAG6_CB(next)->offset += i; - fq->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragmnet is completely overridden with - * new one drop it. - */ - next = next->next; + /* Check for overlap with preceding fragment. */ + if (prev && + (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) + goto discard_fq; - if (prev) - prev->next = next; - else - fq->q.fragments = next; - - fq->q.meat -= free_it->len; - frag_kfree_skb(free_it); - } - } + /* Look for overlap with succeeding segment. */ + if (next && NFCT_FRAG6_CB(next)->offset < end) + goto discard_fq; NFCT_FRAG6_CB(skb)->offset = offset; @@ -371,6 +319,8 @@ found: write_unlock(&nf_frags.lock); return 0; +discard_fq: + fq_kill(fq); err: return -1; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 545c4141b755..64cfef1b0a4c 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) } EXPORT_SYMBOL(ip6_frag_match); -/* Memory Tracking Functions. */ -static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) -{ - atomic_sub(skb->truesize, &nf->mem); - kfree_skb(skb); -} - void ip6_frag_init(struct inet_frag_queue *q, void *a) { struct frag_queue *fq = container_of(q, struct frag_queue, q); @@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, } found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. + /* RFC5722, Section 4: + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments, including those not yet received) MUST be silently + * discarded. */ - if (prev) { - int i = (FRAG6_CB(prev)->offset + prev->len) - offset; - if (i > 0) { - offset += i; - if (end <= offset) - goto err; - if (!pskb_pull(skb, i)) - goto err; - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } + /* Check for overlap with preceding fragment. */ + if (prev && + (FRAG6_CB(prev)->offset + prev->len) - offset > 0) + goto discard_fq; - /* Look for overlap with succeeding segments. - * If we can merge fragments, do it. - */ - while (next && FRAG6_CB(next)->offset < end) { - int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ - - if (i < next->len) { - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - if (!pskb_pull(next, i)) - goto err; - FRAG6_CB(next)->offset += i; /* next fragment */ - fq->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragment is completely overridden with - * new one drop it. - */ - next = next->next; - - if (prev) - prev->next = next; - else - fq->q.fragments = next; - - fq->q.meat -= free_it->len; - frag_kfree_skb(fq->q.net, free_it); - } - } + /* Look for overlap with succeeding segment. */ + if (next && FRAG6_CB(next)->offset < end) + goto discard_fq; FRAG6_CB(skb)->offset = offset; @@ -436,6 +393,8 @@ found: write_unlock(&ip6_frags.lock); return -1; +discard_fq: + fq_kill(fq); err: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 1dd1affdead2..5acb3560ff15 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); } +static void udp_v6_rehash(struct sock *sk) +{ + u16 new_hash = udp6_portaddr_hash(sock_net(sk), + &inet6_sk(sk)->rcv_saddr, + inet_sk(sk)->inet_num); + + udp_lib_rehash(sk, new_hash); +} + static inline int compute_score(struct sock *sk, struct net *net, unsigned short hnum, struct in6_addr *saddr, __be16 sport, @@ -1447,6 +1456,7 @@ struct proto udpv6_prot = { .backlog_rcv = udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a788f9e9427d..6130f9d9dbe1 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ le16_to_cpus(&val_len); n+=2; - if (val_len > 1016) { + if (val_len >= 1016) { IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); return -RSP_INVALID_COMMAND_FORMAT; } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 4f8ddba48011..4c2f89df5cce 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, ip_vs_out_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); + ip_vs_update_conntrack(skb, cp, 0); ip_vs_conn_put(cp); skb->ipvs_property = 1; diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 33b329bfc2d2..7e9af5b76d9e 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -410,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; - struct nf_conn *ct; #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, @@ -497,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, ip_vs_control_add(n_cp, cp); } - ct = (struct nf_conn *)skb->nfct; - if (ct && ct != &nf_conntrack_untracked) - ip_vs_expect_related(skb, ct, n_cp, - IPPROTO_TCP, &n_cp->dport, 1); - /* * Move tunnel to listen state */ diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 21e1a5e9b9d3..49df6bea6a2d 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, } #endif -static void -ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) +void +ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) { struct nf_conn *ct = (struct nf_conn *)skb->nfct; struct nf_conntrack_tuple new_tuple; @@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) * real-server we will see RIP->DIP. */ new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; - new_tuple.src.u3 = cp->daddr; + if (outin) + new_tuple.src.u3 = cp->daddr; + else + new_tuple.dst.u3 = cp->vaddr; /* * This will also take care of UDP and other protocols. */ - new_tuple.src.u.tcp.port = cp->dport; + if (outin) + new_tuple.src.u.tcp.port = cp->dport; + else + new_tuple.dst.u.tcp.port = cp->vport; nf_conntrack_alter_reply(ct, &new_tuple); } @@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); - ip_vs_update_conntrack(skb, cp); + ip_vs_update_conntrack(skb, cp, 1); /* FIXME: when application helper enlarges the packet and the length is larger than the MTU of outgoing device, there will be still @@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); - ip_vs_update_conntrack(skb, cp); + ip_vs_update_conntrack(skb, cp, 1); /* FIXME: when application helper enlarges the packet and the length is larger than the MTU of outgoing device, there will be still diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 24b2cd555637..d344dc481ccc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1232,6 +1232,18 @@ out: return 0; } +static bool list_has_sctp_addr(const struct list_head *list, + union sctp_addr *ipaddr) +{ + struct sctp_transport *addr; + + list_for_each_entry(addr, list, transports) { + if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) + return true; + } + + return false; +} /* A restart is occurring, check to make sure no new addresses * are being added as we may be under a takeover attack. */ @@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, struct sctp_chunk *init, sctp_cmd_seq_t *commands) { - struct sctp_transport *new_addr, *addr; - int found; + struct sctp_transport *new_addr; + int ret = 1; - /* Implementor's Guide - Sectin 5.2.2 + /* Implementor's Guide - Section 5.2.2 * ... * Before responding the endpoint MUST check to see if the * unexpected INIT adds new addresses to the association. If new @@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, /* Search through all current addresses and make sure * we aren't adding any new ones. */ - new_addr = NULL; - found = 0; - list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, - transports) { - found = 0; - list_for_each_entry(addr, &asoc->peer.transport_addr_list, - transports) { - if (sctp_cmp_addr_exact(&new_addr->ipaddr, - &addr->ipaddr)) { - found = 1; - break; - } - } - if (!found) + transports) { + if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, + &new_addr->ipaddr)) { + sctp_sf_send_restart_abort(&new_addr->ipaddr, init, + commands); + ret = 0; break; - } - - /* If a new address was added, ABORT the sender. */ - if (!found && new_addr) { - sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands); + } } /* Return success if all addresses were found. */ - return found; + return ret; } /* Populate the verification/tie tags based on overlapping INIT diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4414a18c63b4..0b39b2451ea5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock) static u32 ordernum = 1; struct unix_address *addr; int err; + unsigned int retries = 0; mutex_lock(&u->readlock); @@ -717,9 +718,17 @@ retry: if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, addr->hash)) { spin_unlock(&unix_table_lock); - /* Sanity yield. It is unusual case, but yet... */ - if (!(ordernum&0xFF)) - yield(); + /* + * __unix_find_socket_byname() may take long time if many names + * are already in use. + */ + cond_resched(); + /* Give up if all names seems to be in use. */ + if (retries++ == 0xFFFFF) { + err = -ENOSPC; + kfree(addr); + goto out; + } goto retry; } addr->hash ^= sk->sk_type; diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h index 3c88be946494..02baec732bb5 100644 --- a/security/apparmor/include/resource.h +++ b/security/apparmor/include/resource.h @@ -33,8 +33,8 @@ struct aa_rlimit { }; int aa_map_resource(int resource); -int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, - struct rlimit *new_rlim); +int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *, + unsigned int resource, struct rlimit *new_rlim); void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 6e85cdb4303f..506d2baf6147 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name) *ns_name = NULL; if (name[0] == ':') { char *split = strchr(&name[1], ':'); + *ns_name = skip_spaces(&name[1]); if (split) { /* overwrite ':' with \0 */ *split = 0; @@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name) } else /* a ns name without a following profile is allowed */ name = NULL; - *ns_name = &name[1]; } if (name && *name == 0) name = NULL; diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index f73e2c204218..cf1de4462ccd 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, int error = 0; if (!unconfined(profile)) - error = aa_task_setrlimit(profile, resource, new_rlim); + error = aa_task_setrlimit(profile, task, resource, new_rlim); return error; } diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 19358dc14605..82396050f186 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c @@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, { struct path root, tmp; char *res; - int deleted, connected; - int error = 0; + int connected, error = 0; /* Get the root we want to resolve too, released below */ if (flags & PATH_CHROOT_REL) { @@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, } spin_lock(&dcache_lock); - /* There is a race window between path lookup here and the - * need to strip the " (deleted) string that __d_path applies - * Detect the race and relookup the path - * - * The stripping of (deleted) is a hack that could be removed - * with an updated __d_path - */ - do { - tmp = root; - deleted = d_unlinked(path->dentry); - res = __d_path(path, &tmp, buf, buflen); - - } while (deleted != d_unlinked(path->dentry)); + tmp = root; + res = __d_path(path, &tmp, buf, buflen); spin_unlock(&dcache_lock); *name = res; @@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, *name = buf; goto out; } - if (deleted) { - /* On some filesystems, newly allocated dentries appear to the - * security_path hooks as a deleted dentry except without an - * inode allocated. - * - * Remove the appended deleted text and return as string for - * normal mediation, or auditing. The (deleted) string is - * guaranteed to be added in this case, so just strip it. - */ - buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */ - if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) { + /* Handle two cases: + * 1. A deleted dentry && profile is not allowing mediation of deleted + * 2. On some filesystems, newly allocated dentries appear to the + * security_path hooks as a deleted dentry except without an inode + * allocated. + */ + if (d_unlinked(path->dentry) && path->dentry->d_inode && + !(flags & PATH_MEDIATE_DELETED)) { error = -ENOENT; goto out; - } } /* Determine if the path is connected to the expected root */ diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 3cdc1ad0787e..52cc865f1464 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) /* released below */ ns = aa_get_namespace(root); - write_lock(&ns->lock); if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ + write_lock(&ns->parent->lock); __remove_namespace(ns); + write_unlock(&ns->parent->lock); } else { /* remove profile */ + write_lock(&ns->lock); profile = aa_get_profile(__lookup_profile(&ns->base, name)); if (!profile) { error = -ENOENT; @@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) } name = profile->base.hname; __remove_profile(profile); + write_unlock(&ns->lock); } - write_unlock(&ns->lock); /* don't fail removal if audit fails */ (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index 4a368f1fd36d..a4136c10b1c6 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c @@ -72,6 +72,7 @@ int aa_map_resource(int resource) /** * aa_task_setrlimit - test permission to set an rlimit * @profile - profile confining the task (NOT NULL) + * @task - task the resource is being set on * @resource - the resource being set * @new_rlim - the new resource limit (NOT NULL) * @@ -79,18 +80,21 @@ int aa_map_resource(int resource) * * Returns: 0 or error code if setting resource failed */ -int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, - struct rlimit *new_rlim) +int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task, + unsigned int resource, struct rlimit *new_rlim) { int error = 0; - if (profile->rlimits.mask & (1 << resource) && - new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) - - error = audit_resource(profile, resource, new_rlim->rlim_max, - -EACCES); + /* TODO: extend resource control to handle other (non current) + * processes. AppArmor rules currently have the implicit assumption + * that the task is setting the resource of the current process + */ + if ((task != current->group_leader) || + (profile->rlimits.mask & (1 << resource) && + new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)) + error = -EACCES; - return error; + return audit_resource(profile, resource, new_rlim->rlim_max, error); } /** diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 16d100d3fc38..3fbcd1dda0ef 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) /* set during initialization */ +extern int iint_initialized; extern int ima_initialized; extern int ima_used_chip; extern char *ima_hash; diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index 7625b85c2274..afba4aef812f 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c @@ -22,9 +22,10 @@ RADIX_TREE(ima_iint_store, GFP_ATOMIC); DEFINE_SPINLOCK(ima_iint_lock); - static struct kmem_cache *iint_cache __read_mostly; +int iint_initialized = 0; + /* ima_iint_find_get - return the iint associated with an inode * * ima_iint_find_get gets a reference to the iint. Caller must @@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void) iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); + iint_initialized = 1; return 0; } security_initcall(ima_iintcache_init); diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index f93641382e9f..e662b89d4079 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -148,12 +148,14 @@ void ima_counts_get(struct file *file) struct ima_iint_cache *iint; int rc; - if (!ima_initialized || !S_ISREG(inode->i_mode)) + if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find_get(inode); if (!iint) return; mutex_lock(&iint->mutex); + if (!ima_initialized) + goto out; rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); if (rc < 0) goto out; @@ -213,7 +215,7 @@ void ima_file_free(struct file *file) struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; - if (!ima_initialized || !S_ISREG(inode->i_mode)) + if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find_get(inode); if (!iint) @@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename, { struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; - int rc; + int rc = 0; if (!ima_initialized || !S_ISREG(inode->i_mode)) return 0; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index b2b0998d6abd..60924f6a52db 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void) keyring_r = NULL; me = current; + rcu_read_lock(); write_lock_irq(&tasklist_lock); parent = me->real_parent; @@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void) goto not_permitted; /* the keyrings must have the same UID */ - if (pcred->tgcred->session_keyring->uid != mycred->euid || + if ((pcred->tgcred->session_keyring && + pcred->tgcred->session_keyring->uid != mycred->euid) || mycred->tgcred->session_keyring->uid != mycred->euid) goto not_permitted; @@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void) set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); if (oldcred) put_cred(oldcred); return 0; @@ -1327,6 +1330,7 @@ already_same: ret = 0; not_permitted: write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); put_cred(cred); return ret; diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index eb68326c37d4..a7868ad4d530 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card, if (get_user(device, (int __user *)argp)) return -EFAULT; + if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */ + device = SNDRV_RAWMIDI_DEVICES - 1; mutex_lock(®ister_mutex); device = device < 0 ? 0 : device + 1; while (device < SNDRV_RAWMIDI_DEVICES) { diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c index 685712276ac9..69cd7b3c362d 100644 --- a/sound/core/seq/oss/seq_oss_init.c +++ b/sound/core/seq/oss/seq_oss_init.c @@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level) return 0; _error: - snd_seq_oss_writeq_delete(dp->writeq); - snd_seq_oss_readq_delete(dp->readq); snd_seq_oss_synth_cleanup(dp); snd_seq_oss_midi_cleanup(dp); - delete_port(dp); delete_seq_queue(dp->queue); - kfree(dp); + delete_port(dp); return rc; } @@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp) static int delete_port(struct seq_oss_devinfo *dp) { - if (dp->port < 0) + if (dp->port < 0) { + kfree(dp); return 0; + } debug_printk(("delete_port %i\n", dp->port)); return snd_seq_event_port_detach(dp->cseq, dp->port); diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c index 5f3e68401f90..91d6023a63e5 100644 --- a/sound/isa/msnd/msnd_pinnacle.c +++ b/sound/isa/msnd/msnd_pinnacle.c @@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; +#ifndef MSND_CLASSIC static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; -#ifndef MSND_CLASSIC /* Extra Peripheral Configuration (Default: Disable) */ static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; @@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx) struct snd_card *card; struct snd_msnd *chip; - if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) { + if (has_isapnp(idx) +#ifndef MSND_CLASSIC + || cfg[idx] == SNDRV_AUTO_PORT +#endif + ) { printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); return -ENODEV; } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 3827092cc1d2..14829210ef0b 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, cfg->hp_outs--; memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); - memmove(sequences_hp + i - 1, sequences_hp + i, + memmove(sequences_hp + i, sequences_hp + i + 1, sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); } } diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 4ef5efaaaef1..488fd9ade1ba 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = { {} /* terminator */ }; +/* Errata: CS4207 rev C0/C1/C2 Silicon + * + * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf + * + * 6. At high temperature (TA > +85°C), the digital supply current (IVD) + * may be excessive (up to an additional 200 μA), which is most easily + * observed while the part is being held in reset (RESET# active low). + * + * Root Cause: At initial powerup of the device, the logic that drives + * the clock and write enable to the S/PDIF SRC RAMs is not properly + * initialized. + * Certain random patterns will cause a steady leakage current in those + * RAM cells. The issue will resolve once the SRCs are used (turned on). + * + * Workaround: The following verb sequence briefly turns on the S/PDIF SRC + * blocks, which will alleviate the issue. + */ + +static struct hda_verb cs_errata_init_verbs[] = { + {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */ + {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */ + + {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, + {0x11, AC_VERB_SET_PROC_COEF, 0x9999}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, + {0x11, AC_VERB_SET_PROC_COEF, 0xa412}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0009}, + + {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */ + {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */ + + {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, + {0x11, AC_VERB_SET_PROC_COEF, 0x2412}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0000}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, + {0x11, AC_VERB_SET_PROC_STATE, 0x00}, + + {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ + {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ + /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ + + {} /* terminator */ +}; + /* SPDIF setup */ static void init_digital(struct hda_codec *codec) { @@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec) { struct cs_spec *spec = codec->spec; + /* init_verb sequence for C0/C1/C2 errata*/ + snd_hda_sequence_write(codec, cs_errata_init_verbs); + snd_hda_sequence_write(codec, cs_coef_init_verbs); if (spec->gpio_mask) { diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 5cdb80edbd7f..71f9d6475b09 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -116,6 +116,7 @@ struct conexant_spec { unsigned int dell_vostro:1; unsigned int ideapad:1; unsigned int thinkpad:1; + unsigned int hp_laptop:1; unsigned int ext_mic_present; unsigned int recording; @@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec) } } +/* toggle input of built-in digital mic and mic jack appropriately */ +static void cxt5066_hp_laptop_automic(struct hda_codec *codec) +{ + unsigned int present; + + present = snd_hda_jack_detect(codec, 0x1b); + snd_printdd("CXT5066: external microphone present=%d\n", present); + snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL, + present ? 1 : 3); +} + + /* toggle input of built-in digital mic and mic jack appropriately order is: external mic -> dock mic -> interal mic */ static void cxt5066_thinkpad_automic(struct hda_codec *codec) @@ -2408,6 +2421,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res) } /* unsolicited event for jack sensing */ +static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res) +{ + snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26); + switch (res >> 26) { + case CONEXANT_HP_EVENT: + cxt5066_hp_automute(codec); + break; + case CONEXANT_MIC_EVENT: + cxt5066_hp_laptop_automic(codec); + break; + } +} + +/* unsolicited event for jack sensing */ static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) { snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); @@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = { { } /* end */ }; + +static struct hda_verb cxt5066_init_verbs_hp_laptop[] = { + {0x14, AC_VERB_SET_CONNECT_SEL, 0x0}, + {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, + {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT}, + { } /* end */ +}; + /* initialize jack-sensing, too */ static int cxt5066_init(struct hda_codec *codec) { @@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec) cxt5066_ideapad_automic(codec); else if (spec->thinkpad) cxt5066_thinkpad_automic(codec); + else if (spec->hp_laptop) + cxt5066_hp_laptop_automic(codec); } cxt5066_set_mic_boost(codec); return 0; @@ -3031,6 +3068,7 @@ enum { CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ + CXT5066_HP_LAPTOP, /* HP Laptop */ CXT5066_MODELS }; @@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = { [CXT5066_DELL_VOSTO] = "dell-vostro", [CXT5066_IDEAPAD] = "ideapad", [CXT5066_THINKPAD] = "thinkpad", + [CXT5066_HP_LAPTOP] = "hp-laptop", }; static struct snd_pci_quirk cxt5066_cfg_tbl[] = { @@ -3052,8 +3091,10 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), @@ -3116,6 +3157,23 @@ static int patch_cxt5066(struct hda_codec *codec) spec->num_init_verbs++; spec->dell_automute = 1; break; + case CXT5066_HP_LAPTOP: + codec->patch_ops.init = cxt5066_init; + codec->patch_ops.unsol_event = cxt5066_hp_laptop_event; + spec->init_verbs[spec->num_init_verbs] = + cxt5066_init_verbs_hp_laptop; + spec->num_init_verbs++; + spec->hp_laptop = 1; + spec->mixers[spec->num_mixers++] = cxt5066_mixer_master; + spec->mixers[spec->num_mixers++] = cxt5066_mixers; + /* no S/PDIF out */ + spec->multiout.dig_out_nid = 0; + /* input source automatically selected */ + spec->input_mux = NULL; + spec->port_d_mode = 0; + spec->mic_boost = 3; /* default 30dB gain */ + break; + case CXT5066_OLPC_XO_1_5: codec->patch_ops.init = cxt5066_olpc_init; codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 627bf9963368..bcbf9160ed81 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5334,6 +5334,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, static struct snd_pci_quirk beep_white_list[] = { SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), + SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1), SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), {} }; diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h index 6147216af744..a3409edcfb50 100644 --- a/sound/pci/oxygen/oxygen.h +++ b/sound/pci/oxygen/oxygen.h @@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci); int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); int oxygen_pci_resume(struct pci_dev *pci); #endif +void oxygen_pci_shutdown(struct pci_dev *pci); /* oxygen_mixer.c */ diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index fad03d64e3ad..7e93cf884437 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c @@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip) } } -static void oxygen_card_free(struct snd_card *card) +static void oxygen_shutdown(struct oxygen *chip) { - struct oxygen *chip = card->private_data; - spin_lock_irq(&chip->reg_lock); chip->interrupt_mask = 0; chip->pcm_running = 0; oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); spin_unlock_irq(&chip->reg_lock); +} + +static void oxygen_card_free(struct snd_card *card) +{ + struct oxygen *chip = card->private_data; + + oxygen_shutdown(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); flush_scheduled_work(); @@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci) } EXPORT_SYMBOL(oxygen_pci_resume); #endif /* CONFIG_PM */ + +void oxygen_pci_shutdown(struct pci_dev *pci) +{ + struct snd_card *card = pci_get_drvdata(pci); + struct oxygen *chip = card->private_data; + + oxygen_shutdown(chip); + chip->model.cleanup(chip); +} +EXPORT_SYMBOL(oxygen_pci_shutdown); diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index f03a2f2cffee..06c863e86e3d 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c @@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = { .suspend = oxygen_pci_suspend, .resume = oxygen_pci_resume, #endif + .shutdown = oxygen_pci_shutdown, }; static int __init alsa_card_xonar_init(void) diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c index dbc4b89d74e4..b82c1cfa96f5 100644 --- a/sound/pci/oxygen/xonar_wm87x6.c +++ b/sound/pci/oxygen/xonar_wm87x6.c @@ -53,6 +53,8 @@ struct xonar_wm87x6 { struct xonar_generic generic; u16 wm8776_regs[0x17]; u16 wm8766_regs[0x10]; + struct snd_kcontrol *line_adcmux_control; + struct snd_kcontrol *mic_adcmux_control; struct snd_kcontrol *lc_controls[13]; }; @@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip) static void xonar_ds_cleanup(struct oxygen *chip) { xonar_disable_output(chip); + wm8776_write(chip, WM8776_RESET, 0); } static void xonar_ds_suspend(struct oxygen *chip) @@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; + struct snd_kcontrol *other_ctl; unsigned int mux_bit = ctl->private_value; u16 reg; int changed; @@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCMUX]; if (value->value.integer.value[0]) { - reg &= ~0x003; reg |= mux_bit; + /* line-in and mic-in are exclusive */ + mux_bit ^= 3; + if (reg & mux_bit) { + reg &= ~mux_bit; + if (mux_bit == 1) + other_ctl = data->line_adcmux_control; + else + other_ctl = data->mic_adcmux_control; + snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &other_ctl->id); + } } else reg &= ~mux_bit; changed = reg != data->wm8776_regs[WM8776_ADCMUX]; @@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip) err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; + if (!strcmp(ctl->id.name, "Line Capture Switch")) + data->line_adcmux_control = ctl; + else if (!strcmp(ctl->id.name, "Mic Capture Switch")) + data->mic_adcmux_control = ctl; } + if (!data->line_adcmux_control || !data->mic_adcmux_control) + return -ENXIO; BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { ctl = snd_ctl_new1(&lc_controls[i], chip); diff --git a/sound/usb/card.c b/sound/usb/card.c index 9feb00c831a0..4eabafa5b037 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head) for (idx = 0; idx < 2; idx++) { subs = &as->substream[idx]; if (!subs->num_formats) - return; + continue; snd_usb_release_substream_urbs(subs, 1); subs->interface = -1; } @@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) } switch (protocol) { + default: + snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n", + protocol); + /* fall through */ + case UAC_VERSION_1: { struct uac1_ac_header_descriptor *h1 = control_header; @@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) break; } - - default: - snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol); - return -EINVAL; } return 0; @@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev, goto __error; } - chip->ctrl_intf = alts; + /* + * For devices with more than one control interface, we assume the + * first contains the audio controls. We might need a more specific + * check here in the future. + */ + if (!chip->ctrl_intf) + chip->ctrl_intf = alts; if (err > 0) { /* create normal USB audio interfaces */ diff --git a/sound/usb/clock.c b/sound/usb/clock.c index b853f8df794f..7754a1034545 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: + default: return set_sample_rate_v1(chip, iface, alts, fmt, rate); case UAC_VERSION_2: return set_sample_rate_v2(chip, iface, alts, fmt, rate); } - - return -EINVAL; } diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 1a701f1e8f50..ef0a07e34844 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) /* get audio formats */ switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n", + dev->devnum, iface_no, altno, protocol); + protocol = UAC_VERSION_1; + /* fall through */ + case UAC_VERSION_1: { struct uac1_as_header_descriptor *as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); @@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) dev->devnum, iface_no, altno, as->bTerminalLink); continue; } - - default: - snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n", - dev->devnum, iface_no, altno, protocol); - continue; } /* get format type */ diff --git a/sound/usb/format.c b/sound/usb/format.c index 3a1375459c06..69148212aa70 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, u64 pcm_formats; switch (protocol) { - case UAC_VERSION_1: { + case UAC_VERSION_1: + default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; @@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, format <<= 1; break; } - - default: - return -EINVAL; } pcm_formats = 0; @@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, * audio class v2 uses class specific EP0 range requests for that. */ switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", + chip->dev->devnum, fp->iface, fp->altsetting, protocol); + /* fall through */ case UAC_VERSION_1: fp->channels = fmt->bNrChannels; ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); @@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, /* fp->channels is already set in this case */ ret = parse_audio_format_rates_v2(chip, fp); break; - default: - snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", - chip->dev->devnum, fp->iface, fp->altsetting, protocol); - return -EINVAL; } if (fp->channels < 1) { @@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, fp->channels = 1; switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", + chip->dev->devnum, fp->iface, fp->altsetting, protocol); + /* fall through */ case UAC_VERSION_1: { struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); @@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, ret = parse_audio_format_rates_v2(chip, fp); break; } - default: - snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n", - chip->dev->devnum, fp->iface, fp->altsetting, protocol); - return -EINVAL; } return ret; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c166db0057d3..3ed3901369ce 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, } host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; - mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; + switch (get_iface_desc(host_iface)->bInterfaceProtocol) { + case UAC_VERSION_1: + default: + mixer->protocol = UAC_VERSION_1; + break; + case UAC_VERSION_2: + mixer->protocol = UAC_VERSION_2; + break; + } if ((err = snd_usb_mixer_controls(mixer)) < 0 || (err = snd_usb_mixer_status_create(mixer)) < 0) diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 3634cedf9306..3b5135c93062 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: + default: return init_pitch_v1(chip, iface, alts, fmt); case UAC_VERSION_2: return init_pitch_v2(chip, iface, alts, fmt); } - - return -EINVAL; } /* diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 624a96c636fd..6de4313924fb 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node) INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->val); + node->children_hit = 0; node->parent = NULL; node->hit = 0; } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e72f05c3bef0..fcc16e4349df 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, goto error; } tev->point.offset = pev->point.offset; + tev->point.retprobe = pev->point.retprobe; tev->nargs = pev->nargs; if (tev->nargs) { tev->args = zalloc(sizeof(struct probe_trace_arg) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 525136684d4e..32b81f707ff5 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) char buf[32], *ptr; int ret, nscopes; + if (!is_c_varname(pf->pvar->var)) { + /* Copy raw parameters */ + pf->tvar->value = strdup(pf->pvar->var); + if (pf->tvar->value == NULL) + return -ENOMEM; + if (pf->pvar->type) { + pf->tvar->type = strdup(pf->pvar->type); + if (pf->tvar->type == NULL) + return -ENOMEM; + } + if (pf->pvar->name) { + pf->tvar->name = strdup(pf->pvar->name); + if (pf->tvar->name == NULL) + return -ENOMEM; + } else + pf->tvar->name = NULL; + return 0; + } + if (pf->pvar->name) pf->tvar->name = strdup(pf->pvar->name); else { @@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) if (pf->tvar->name == NULL) return -ENOMEM; - if (!is_c_varname(pf->pvar->var)) { - /* Copy raw parameters */ - pf->tvar->value = strdup(pf->pvar->var); - if (pf->tvar->value == NULL) - return -ENOMEM; - if (pf->pvar->type) { - pf->tvar->type = strdup(pf->pvar->type); - if (pf->tvar->type == NULL) - return -ENOMEM; - } - return 0; - } - pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); /* Search child die for local variables and parameters. */ @@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) /* This function has no name. */ tev->point.offset = (unsigned long)pf->addr; + /* Return probe must be on the head of a subprogram */ + if (pf->pev->point.retprobe) { + if (tev->point.offset != 0) { + pr_warning("Return probe must be on the head of" + " a real function\n"); + return -EINVAL; + } + tev->point.retprobe = true; + } + pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, tev->point.offset); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1a367734e016..b2f5ae97f33d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str, int symbol__init(void) { + if (symbol_conf.initialized) + return 0; + elf_version(EV_CURRENT); if (symbol_conf.sort_by_name) symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - @@ -2293,6 +2296,7 @@ int symbol__init(void) symbol_conf.sym_list_str, "symbol") < 0) goto out_free_comm_list; + symbol_conf.initialized = true; return 0; out_free_dso_list: @@ -2304,11 +2308,14 @@ out_free_comm_list: void symbol__exit(void) { + if (!symbol_conf.initialized) + return; strlist__delete(symbol_conf.sym_list); strlist__delete(symbol_conf.dso_list); strlist__delete(symbol_conf.comm_list); vmlinux_path__exit(); symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; + symbol_conf.initialized = false; } int machines__create_kernel_maps(struct rb_root *self, pid_t pid) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index b7a8da4af5a0..ea95c2756f05 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -69,7 +69,8 @@ struct symbol_conf { show_nr_samples, use_callchain, exclude_other, - show_cpu_utilization; + show_cpu_utilization, + initialized; const char *vmlinux_name, *source_prefix, *field_sep; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b78b794c1039..d4853a54771a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, cpu); hardware_disable(NULL); break; - case CPU_ONLINE: + case CPU_STARTING: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); - smp_call_function_single(cpu, hardware_enable, NULL, 1); + hardware_enable(NULL); break; } return NOTIFY_OK; @@ -2096,7 +2096,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, static struct notifier_block kvm_cpu_notifier = { .notifier_call = kvm_cpu_hotplug, - .priority = 20, /* must be > scheduler priority */ }; static int vm_stat_get(void *_offset, u64 *val) |