diff options
1176 files changed, 20225 insertions, 12232 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb new file mode 100644 index 000000000000..2107082426da --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb @@ -0,0 +1,44 @@ +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_alpha +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the alpha blending value for the overlay. Values range + from 0 (transparent) to 255 (opaque). The value is ignored if + the mode is not set to Alpha Blending. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_mode +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Selects the composition mode for the overlay. Possible values + are + + 0 - Alpha Blending + 1 - ROP3 + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_position +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the x,y overlay position on the display in pixels. The + position format is `[0-9]+,[0-9]+'. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_rop3 +Date: May 2012 +Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the raster operation (ROP3) for the overlay. Values + range from 0 to 255. The value is ignored if the mode is not + set to ROP3. diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop index 814b01354c41..b31e782bd985 100644 --- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop +++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop @@ -5,4 +5,15 @@ Contact: "Ike Panhc <ike.pan@canonical.com>" Description: Control the power of camera module. 1 means on, 0 means off. +What: /sys/devices/platform/ideapad/fan_mode +Date: June 2012 +KernelVersion: 3.6 +Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>" +Description: + Change fan mode + There are four available modes: + * 0 -> Super Silent Mode + * 1 -> Standard Mode + * 2 -> Dust Cleaning + * 4 -> Efficient Thermal Dissipation Mode diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl index 3fca32c41927..25b58efd955d 100644 --- a/Documentation/DocBook/filesystems.tmpl +++ b/Documentation/DocBook/filesystems.tmpl @@ -224,8 +224,8 @@ all your transactions. </para> <para> -Then at umount time , in your put_super() (2.4) or write_super() (2.5) -you can then call journal_destroy() to clean up your in-core journal object. +Then at umount time , in your put_super() you can then call journal_destroy() +to clean up your in-core journal object. </para> <para> diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml index 720395127904..701138f1209d 100644 --- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml +++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml @@ -125,7 +125,7 @@ the structure refers to a radio tuner the <constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para> <para>If multiple frequency bands are supported, then <structfield>capability</structfield> is the union of all -<structfield>capability></structfield> fields of each &v4l2-frequency-band;. +<structfield>capability</structfield> fields of each &v4l2-frequency-band;. </para></entry> </row> <row> diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index d8147b336c35..6518a55273e7 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -38,6 +38,13 @@ read or write requests. Note that the total allocated number may be twice this amount, since it applies only to reads or writes (not the accumulated sum). +To avoid priority inversion through request starvation, a request +queue maintains a separate request pool per each cgroup when +CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such +per-block-cgroup request pool. IOW, if there are N block cgroups, +each request queue may have upto N request pools, each independently +regulated by nr_requests. + read_ahead_kb (RW) ------------------ Maximum number of kilobytes to read-ahead for filesystems on this block diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 946c73342cde..1c1844957166 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -27,6 +27,10 @@ The target is named "raid" and it accepts the following parameters: - rotating parity N (right-to-left) with data restart raid6_nc RAID6 N continue - rotating parity N (right-to-left) with data continuation + raid10 Various RAID10 inspired algorithms chosen by additional params + - RAID10: Striped Mirrors (aka 'Striping on top of mirrors') + - RAID1E: Integrated Adjacent Stripe Mirroring + - and other similar RAID10 variants Reference: Chapter 4 of http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf @@ -59,6 +63,28 @@ The target is named "raid" and it accepts the following parameters: logical size of the array. The bitmap records the device synchronisation state for each region. + [raid10_copies <# copies>] + [raid10_format near] + These two options are used to alter the default layout of + a RAID10 configuration. The number of copies is can be + specified, but the default is 2. There are other variations + to how the copies are laid down - the default and only current + option is "near". Near copies are what most people think of + with respect to mirroring. If these options are left + unspecified, or 'raid10_copies 2' and/or 'raid10_format near' + are given, then the layouts for 2, 3 and 4 devices are: + 2 drives 3 drives 4 drives + -------- ---------- -------------- + A1 A1 A1 A1 A2 A1 A1 A2 A2 + A2 A2 A2 A3 A3 A3 A3 A4 A4 + A3 A3 A4 A4 A5 A5 A5 A6 A6 + A4 A4 A5 A6 A6 A7 A7 A8 A8 + .. .. .. .. .. .. .. .. .. + The 2-device layout is equivalent 2-way RAID1. The 4-device + layout is what a traditional RAID10 would look like. The + 3-device layout is what might be called a 'RAID1E - Integrated + Adjacent Stripe Mirroring'. + <#raid_devs>: The number of devices composing the array. Each device consists of two entries. The first is the device containing the metadata (if any); the second is the one containing the diff --git a/Documentation/devicetree/bindings/arm/mrvl/intc.txt b/Documentation/devicetree/bindings/arm/mrvl/intc.txt index 80b9a94d9a23..8b53273cb22f 100644 --- a/Documentation/devicetree/bindings/arm/mrvl/intc.txt +++ b/Documentation/devicetree/bindings/arm/mrvl/intc.txt @@ -38,3 +38,23 @@ Example: reg-names = "mux status", "mux mask"; mrvl,intc-nr-irqs = <2>; }; + +* Marvell Orion Interrupt controller + +Required properties +- compatible : Should be "marvell,orion-intc". +- #interrupt-cells: Specifies the number of cells needed to encode an + interrupt source. Supported value is <1>. +- interrupt-controller : Declare this node to be an interrupt controller. +- reg : Interrupt mask address. A list of 4 byte ranges, one per controller. + One entry in the list represents 32 interrupts. + +Example: + + intc: interrupt-controller { + compatible = "marvell,orion-intc", "marvell,intc"; + interrupt-controller; + #interrupt-cells = <1>; + reg = <0xfed20204 0x04>, + <0xfed20214 0x04>; + }; diff --git a/Documentation/devicetree/bindings/ata/marvell.txt b/Documentation/devicetree/bindings/ata/marvell.txt new file mode 100644 index 000000000000..b5cdd20cde9c --- /dev/null +++ b/Documentation/devicetree/bindings/ata/marvell.txt @@ -0,0 +1,16 @@ +* Marvell Orion SATA + +Required Properties: +- compatibility : "marvell,orion-sata" +- reg : Address range of controller +- interrupts : Interrupt controller is using +- nr-ports : Number of SATA ports in use. + +Example: + + sata@80000 { + compatible = "marvell,orion-sata"; + reg = <0x80000 0x5000>; + interrupts = <21>; + nr-ports = <2>; + } diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt index 05428f39d9ac..e13787498bcf 100644 --- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt +++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt @@ -27,3 +27,26 @@ Example: interrupt-controller; #interrupt-cells = <1>; }; + +* Marvell Orion GPIO Controller + +Required properties: +- compatible : Should be "marvell,orion-gpio" +- reg : Address and length of the register set for controller. +- gpio-controller : So we know this is a gpio controller. +- ngpio : How many gpios this controller has. +- interrupts : Up to 4 Interrupts for the controller. + +Optional properties: +- mask-offset : For SMP Orions, offset for Nth CPU + +Example: + + gpio0: gpio@10100 { + compatible = "marvell,orion-gpio"; + #gpio-cells = <2>; + gpio-controller; + reg = <0x10100 0x40>; + ngpio = <32>; + interrupts = <35>, <36>, <37>, <38>; + }; diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt index d156e1b5db12..da80c2ae0915 100644 --- a/Documentation/devicetree/bindings/regulator/tps6586x.txt +++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt @@ -9,9 +9,9 @@ Required properties: - regulators: list of regulators provided by this controller, must have property "regulator-compatible" to match their hardware counterparts: sm[0-2], ldo[0-9] and ldo_rtc -- sm0-supply: The input supply for the SM0. -- sm1-supply: The input supply for the SM1. -- sm2-supply: The input supply for the SM2. +- vin-sm0-supply: The input supply for the SM0. +- vin-sm1-supply: The input supply for the SM1. +- vin-sm2-supply: The input supply for the SM2. - vinldo01-supply: The input supply for the LDO1 and LDO2 - vinldo23-supply: The input supply for the LDO2 and LDO3 - vinldo4-supply: The input supply for the LDO4 @@ -30,9 +30,9 @@ Example: #gpio-cells = <2>; gpio-controller; - sm0-supply = <&some_reg>; - sm1-supply = <&some_reg>; - sm2-supply = <&some_reg>; + vin-sm0-supply = <&some_reg>; + vin-sm1-supply = <&some_reg>; + vin-sm2-supply = <&some_reg>; vinldo01-supply = <...>; vinldo23-supply = <...>; vinldo4-supply = <...>; diff --git a/Documentation/devicetree/bindings/watchdog/marvel.txt b/Documentation/devicetree/bindings/watchdog/marvel.txt new file mode 100644 index 000000000000..0b2503ab0a05 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/marvel.txt @@ -0,0 +1,14 @@ +* Marvell Orion Watchdog Time + +Required Properties: + +- Compatibility : "marvell,orion-wdt" +- reg : Address of the timer registers + +Example: + + wdt@20300 { + compatible = "marvell,orion-wdt"; + reg = <0x20300 0x28>; + status = "okay"; + }; diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 72ed15075f79..afaff312bf41 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -626,3 +626,14 @@ Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag Who: Sylwester Nawrocki <s.nawrocki@samsung.com> ---------------------------- + +What: OMAP private DMA implementation +When: 2013 +Why: We have a DMA engine implementation; all users should be updated + to use this rather than persisting with the old APIs. The old APIs + block merging the old DMA engine implementation into the DMA + engine driver. +Who: Russell King <linux@arm.linux.org.uk>, + Santosh Shilimkar <santosh.shilimkar@ti.com> + +---------------------------- diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 2db1900d7538..e540a24e5d06 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -114,7 +114,6 @@ prototypes: int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -136,10 +135,9 @@ write_inode: drop_inode: !!!inode->i_lock!!! evict_inode: put_super: write -write_super: read sync_fs: read -freeze_fs: read -unfreeze_fs: read +freeze_fs: write +unfreeze_fs: write statfs: maybe(read) (see below) remount_fs: write umount_begin: no @@ -359,7 +357,6 @@ prototypes: int (*lm_compare_owner)(struct file_lock *, struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_release_private)(struct file_lock *); void (*lm_break)(struct file_lock *); /* break_lease callback */ int (*lm_change)(struct file_lock **, int); @@ -368,7 +365,6 @@ locking rules: lm_compare_owner: yes no lm_notify: yes no lm_grant: no no -lm_release_private: maybe no lm_break: yes no lm_change yes no diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 2bef2b3843d1..0742feebc6e2 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -94,9 +94,8 @@ protected. --- [mandatory] -BKL is also moved from around sb operations. ->write_super() Is now called -without BKL held. BKL should have been shifted into individual fs sb_op -functions. If you don't need it, remove it. +BKL is also moved from around sb operations. BKL should have been shifted into +individual fs sb_op functions. If you don't need it, remove it. --- [informational] diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt index ead764b2728f..de1e6c4dccff 100644 --- a/Documentation/filesystems/vfat.txt +++ b/Documentation/filesystems/vfat.txt @@ -137,6 +137,17 @@ errors=panic|continue|remount-ro without doing anything or remount the partition in read-only mode (default behavior). +discard -- If set, issues discard/TRIM commands to the block + device when blocks are freed. This is useful for SSD devices + and sparse/thinly-provisoned LUNs. + +nfs -- This option maintains an index (cache) of directory + inodes by i_logstart which is used by the nfs-related code to + improve look-ups. + + Enable this only if you want to export the FAT filesystem + over NFS + <bool>: 0,1,yes,no,true,false TODO diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 065aa2dc0835..2ee133e030c3 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -216,7 +216,6 @@ struct super_operations { void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -273,9 +272,6 @@ or bottom half). put_super: called when the VFS wishes to free the superblock (i.e. unmount). This is called with the superblock lock held - write_super: called when the VFS superblock needs to be written to - disc. This method is optional - sync_fs: called when VFS is writing out all dirty data associated with a superblock. The second parameter indicates whether the method should wait until the write out has been completed. Optional. diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt index 0bf25eebce94..4ebbfc3f1c6e 100644 --- a/Documentation/laptops/laptop-mode.txt +++ b/Documentation/laptops/laptop-mode.txt @@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10 # # Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been -# exceeded, the kernel will wake pdflush which will then reduce the amount -# of dirty memory to dirty_background_ratio. Set this nice and low, so once -# some writeout has commenced, we do a lot of it. +# exceeded, the kernel will wake flusher threads which will then reduce the +# amount of dirty memory to dirty_background_ratio. Set this nice and low, +# so once some writeout has commenced, we do a lot of it. # #DIRTY_BACKGROUND_RATIO=5 @@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'} # # Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been -# exceeded, the kernel will wake pdflush which will then reduce the amount -# of dirty memory to dirty_background_ratio. Set this nice and low, so once -# some writeout has commenced, we do a lot of it. +# exceeded, the kernel will wake flusher threads which will then reduce the +# amount of dirty memory to dirty_background_ratio. Set this nice and low, +# so once some writeout has commenced, we do a lot of it. # DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'} diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt index 8d022073e3ef..2e9e0ae2cd45 100644 --- a/Documentation/networking/netconsole.txt +++ b/Documentation/networking/netconsole.txt @@ -51,8 +51,23 @@ Built-in netconsole starts immediately after the TCP stack is initialized and attempts to bring up the supplied dev at the supplied address. -The remote host can run either 'netcat -u -l -p <port>', -'nc -l -u <port>' or syslogd. +The remote host has several options to receive the kernel messages, +for example: + +1) syslogd + +2) netcat + + On distributions using a BSD-based netcat version (e.g. Fedora, + openSUSE and Ubuntu) the listening port must be specified without + the -p switch: + + 'nc -u -l -p <port>' / 'nc -u -l <port>' or + 'netcat -u -l -p <port>' / 'netcat -u -l <port>' + +3) socat + + 'socat udp-recv:<port> -' Dynamic reconfiguration: ======================== diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt index e40f4b4e1977..1479aca23744 100644 --- a/Documentation/pinctrl.txt +++ b/Documentation/pinctrl.txt @@ -840,9 +840,9 @@ static unsigned long i2c_pin_configs[] = { static struct pinctrl_map __initdata mapping[] = { PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"), - PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), - PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), - PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), + PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), + PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), + PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), }; Finally, some devices expect the mapping table to contain certain specific diff --git a/Documentation/security/Yama.txt b/Documentation/security/Yama.txt index e369de2d48cd..dd908cf64ecf 100644 --- a/Documentation/security/Yama.txt +++ b/Documentation/security/Yama.txt @@ -46,14 +46,13 @@ restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...) so that any otherwise allowed process (even those in external pid namespaces) may attach. -These restrictions do not change how ptrace via PTRACE_TRACEME operates. - -The sysctl settings are: +The sysctl settings (writable only with CAP_SYS_PTRACE) are: 0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other process running under the same uid, as long as it is dumpable (i.e. did not transition uids, start privileged, or have called - prctl(PR_SET_DUMPABLE...) already). + prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is + unchanged. 1 - restricted ptrace: a process must have a predefined relationship with the inferior it wants to call PTRACE_ATTACH on. By default, @@ -61,12 +60,13 @@ The sysctl settings are: classic criteria is also met. To change the relationship, an inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare an allowed debugger PID to call PTRACE_ATTACH on the inferior. + Using PTRACE_TRACEME is unchanged. 2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace - with PTRACE_ATTACH. + with PTRACE_ATTACH, or through children calling PTRACE_TRACEME. -3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set, - this sysctl cannot be changed to a lower value. +3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via + PTRACE_TRACEME. Once set, this sysctl value cannot be changed. The original children-only logic was based on the restrictions in grsecurity. diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 7456360e161c..a92bba816843 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -53,6 +53,7 @@ ALC882/883/885/888/889 acer-aspire-8930g Acer Aspire 8330G/6935G acer-aspire Acer Aspire others inv-dmic Inverted internal mic workaround + no-primary-hp VAIO Z workaround (for fixed speaker DAC) ALC861/660 ========== @@ -273,6 +274,10 @@ STAC92HD83* dell-s14 Dell laptop dell-vostro-3500 Dell Vostro 3500 laptop hp-dv7-4000 HP dv-7 4000 + hp_cNB11_intquad HP CNB models with 4 speakers + hp-zephyr HP Zephyr + hp-led HP with broken BIOS for mute LED + hp-inv-led HP with broken BIOS for inverted mute LED auto BIOS setup (default) STAC9872 diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 8c235b6e4246..88152f214f48 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt @@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs: - nr_open - overflowuid - overflowgid +- protected_hardlinks +- protected_symlinks - suid_dumpable - super-max - super-nr @@ -157,6 +159,46 @@ The default is 65534. ============================================================== +protected_hardlinks: + +A long-standing class of security issues is the hardlink-based +time-of-check-time-of-use race, most commonly seen in world-writable +directories like /tmp. The common method of exploitation of this flaw +is to cross privilege boundaries when following a given hardlink (i.e. a +root process follows a hardlink created by another user). Additionally, +on systems without separated partitions, this stops unauthorized users +from "pinning" vulnerable setuid/setgid files against being upgraded by +the administrator, or linking to special files. + +When set to "0", hardlink creation behavior is unrestricted. + +When set to "1" hardlinks cannot be created by users if they do not +already own the source file, or do not have read/write access to it. + +This protection is based on the restrictions in Openwall and grsecurity. + +============================================================== + +protected_symlinks: + +A long-standing class of security issues is the symlink-based +time-of-check-time-of-use race, most commonly seen in world-writable +directories like /tmp. The common method of exploitation of this flaw +is to cross privilege boundaries when following a given symlink (i.e. a +root process follows a symlink belonging to another user). For a likely +incomplete list of hundreds of examples across the years, please see: +http://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=/tmp + +When set to "0", symlink following behavior is unrestricted. + +When set to "1" symlinks are permitted to be followed only when outside +a sticky world-writable directory, or when the uid of the symlink and +follower match, or when the directory owner matches the symlink's owner. + +This protection is based on the restrictions in Openwall and grsecurity. + +============================================================== + suid_dumpable: This value can be used to query and set the core dump mode for setuid diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index dcc2a94ae34e..078701fdbd4d 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -76,8 +76,8 @@ huge pages although processes will also directly compact memory as required. dirty_background_bytes -Contains the amount of dirty memory at which the pdflush background writeback -daemon will start writeback. +Contains the amount of dirty memory at which the background kernel +flusher threads will start writeback. Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only one of them may be specified at a time. When one sysctl is written it is @@ -89,7 +89,7 @@ other appears as 0 when read. dirty_background_ratio Contains, as a percentage of total system memory, the number of pages at which -the pdflush background writeback daemon will start writing out dirty data. +the background kernel flusher threads will start writing out dirty data. ============================================================== @@ -112,9 +112,9 @@ retained. dirty_expire_centisecs This tunable is used to define when dirty data is old enough to be eligible -for writeout by the pdflush daemons. It is expressed in 100'ths of a second. -Data which has been dirty in-memory for longer than this interval will be -written out next time a pdflush daemon wakes up. +for writeout by the kernel flusher threads. It is expressed in 100'ths +of a second. Data which has been dirty in-memory for longer than this +interval will be written out next time a flusher thread wakes up. ============================================================== @@ -128,7 +128,7 @@ data. dirty_writeback_centisecs -The pdflush writeback daemons will periodically wake up and write `old' data +The kernel flusher threads will periodically wake up and write `old' data out to disk. This tunable expresses the interval between those wakeups, in 100'ths of a second. diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt index f8551b3879f8..4ac359b7aa17 100644 --- a/Documentation/vm/hugetlbpage.txt +++ b/Documentation/vm/hugetlbpage.txt @@ -299,11 +299,17 @@ map_hugetlb.c. ******************************************************************* /* - * hugepage-shm: see Documentation/vm/hugepage-shm.c + * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c */ ******************************************************************* /* - * hugepage-mmap: see Documentation/vm/hugepage-mmap.c + * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c + */ + +******************************************************************* + +/* + * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c */ diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm index 0403aaaba878..874a8ca93feb 100644 --- a/Documentation/w1/slaves/w1_therm +++ b/Documentation/w1/slaves/w1_therm @@ -3,6 +3,7 @@ Kernel driver w1_therm Supported chips: * Maxim ds18*20 based temperature sensors. + * Maxim ds1825 based temperature sensors. Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru> @@ -15,6 +16,7 @@ supported family codes: W1_THERM_DS18S20 0x10 W1_THERM_DS1822 0x22 W1_THERM_DS18B20 0x28 +W1_THERM_DS1825 0x3B Support is provided through the sysfs w1_slave file. Each open and read sequence will initiate a temperature conversion then provide two diff --git a/MAINTAINERS b/MAINTAINERS index 6720018bc674..fdc0119963e7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -827,24 +827,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -869,7 +869,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -925,14 +925,14 @@ S: Maintained ARM/NOMADIK ARCHITECTURE M: Alessandro Rubini <rubini@unipv.it> -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> M: STEricsson <STEricsson_nomadik_linux@list.st.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-nomadik/ F: arch/arm/plat-nomadik/ F: drivers/i2c/busses/i2c-nomadik.c -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT M: Nelson Castillo <arhuaco@freaks-unidos.net> @@ -1146,7 +1146,7 @@ F: drivers/usb/host/ehci-w90x900.c F: drivers/video/nuc900fb.c ARM/U300 MACHINE SUPPORT -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: arch/arm/mach-u300/ @@ -1161,15 +1161,20 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/Ux500 ARM ARCHITECTURE M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ux500/ +F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/dma/ste_dma40* +F: drivers/hwspinlock/u8500_hsem.c F: drivers/mfd/abx500* F: drivers/mfd/ab8500* -F: drivers/mfd/stmpe* +F: drivers/mfd/dbx500* +F: drivers/mfd/db8500* +F: drivers/pinctrl/pinctrl-nomadik* F: drivers/rtc/rtc-ab8500.c +F: drivers/rtc/rtc-pl031.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/VFP SUPPORT @@ -1227,9 +1232,9 @@ S: Maintained F: drivers/hwmon/asb100.c ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> W: http://sourceforge.net/projects/xscaleiop -S: Supported +S: Maintained F: Documentation/crypto/async-tx-api.txt F: crypto/async_tx/ F: drivers/dma/ @@ -2212,7 +2217,7 @@ S: Maintained F: drivers/scsi/tmscsim.* DC395x SCSI driver -M: Oliver Neukum <oliver@neukum.name> +M: Oliver Neukum <oliver@neukum.org> M: Ali Akcaagac <aliakc@web.de> M: Jamie Lenehan <lenehan@twibble.org> W: http://twibble.org/dist/dc395x/ @@ -2359,7 +2364,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul <vinod.koul@intel.com> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> S: Supported F: drivers/dma/ F: include/linux/dma* @@ -3094,7 +3099,7 @@ F: include/linux/gigaset_dev.h GPIO SUBSYSTEM M: Grant Likely <grant.likely@secretlab.ca> -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> S: Maintained T: git git://git.secretlab.ca/git/linux-2.6.git F: Documentation/gpio.txt @@ -3547,7 +3552,6 @@ K: \b(ABS|SYN)_MT_ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <intel-linux-scu@intel.com> -M: Dan Williams <dan.j.williams@intel.com> M: Dave Jiang <dave.jiang@intel.com> M: Ed Nadolski <edmund.nadolski@intel.com> L: linux-scsi@vger.kernel.org @@ -3590,8 +3594,8 @@ F: arch/x86/kernel/microcode_core.c F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER -M: Dan Williams <dan.j.williams@intel.com> -S: Supported +M: Dan Williams <djbw@fb.com> +S: Maintained F: drivers/dma/ioat* INTEL IOMMU (VT-d) @@ -3603,8 +3607,8 @@ F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER -M: Dan Williams <dan.j.williams@intel.com> -S: Maintained +M: Dan Williams <djbw@fb.com> +S: Odd fixes F: drivers/dma/iop-adma.c INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT @@ -4533,7 +4537,7 @@ S: Supported F: arch/microblaze/ MICROTEK X6 SCANNER -M: Oliver Neukum <oliver@neukum.name> +M: Oliver Neukum <oliver@neukum.org> S: Maintained F: drivers/usb/image/microtek.* @@ -5329,14 +5333,15 @@ PIN CONTROL SUBSYSTEM M: Linus Walleij <linus.walleij@linaro.org> S: Maintained F: drivers/pinctrl/ +F: include/linux/pinctrl/ PIN CONTROLLER - ST SPEAR -M: Viresh Kumar <viresh.linux@gmail.com> +M: Viresh Kumar <viresh.linux@gmail.com> L: spear-devel@list.st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained -F: driver/pinctrl/spear/ +F: drivers/pinctrl/spear/ PKTCDVD DRIVER M: Peter Osterlund <petero2@telia.com> @@ -7071,7 +7076,7 @@ F: include/linux/mtd/ubi.h F: include/mtd/ubi-user.h USB ACM DRIVER -M: Oliver Neukum <oliver@neukum.name> +M: Oliver Neukum <oliver@neukum.org> L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/acm.txt @@ -7092,7 +7097,7 @@ S: Supported F: drivers/block/ub.c USB CDC ETHERNET DRIVER -M: Oliver Neukum <oliver@neukum.name> +M: Oliver Neukum <oliver@neukum.org> L: linux-usb@vger.kernel.org S: Maintained F: drivers/net/usb/cdc_*.c @@ -7165,7 +7170,7 @@ F: drivers/usb/host/isp116x* F: include/linux/usb/isp116x.h USB KAWASAKI LSI DRIVER -M: Oliver Neukum <oliver@neukum.name> +M: Oliver Neukum <oliver@neukum.org> L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/serial/kl5kusb105.* @@ -7283,6 +7288,12 @@ W: http://www.connecttech.com S: Supported F: drivers/usb/serial/whiteheat* +USB SMSC75XX ETHERNET DRIVER +M: Steve Glendinning <steve.glendinning@shawell.net> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/usb/smsc75xx.* + USB SMSC95XX ETHERNET DRIVER M: Steve Glendinning <steve.glendinning@shawell.net> L: netdev@vger.kernel.org @@ -7382,6 +7393,7 @@ W: http://user-mode-linux.sourceforge.net S: Maintained F: Documentation/virtual/uml/ F: arch/um/ +F: arch/x86/um/ F: fs/hostfs/ F: fs/hppfs/ @@ -7664,23 +7676,28 @@ S: Supported F: Documentation/hwmon/wm83?? F: arch/arm/mach-s3c64xx/mach-crag6410* F: drivers/clk/clk-wm83*.c +F: drivers/extcon/extcon-arizona.c F: drivers/leds/leds-wm83*.c F: drivers/gpio/gpio-*wm*.c +F: drivers/gpio/gpio-arizona.c F: drivers/hwmon/wm83??-hwmon.c F: drivers/input/misc/wm831x-on.c F: drivers/input/touchscreen/wm831x-ts.c F: drivers/input/touchscreen/wm97*.c -F: drivers/mfd/wm8*.c +F: drivers/mfd/arizona* +F: drivers/mfd/wm*.c F: drivers/power/wm83*.c F: drivers/rtc/rtc-wm83*.c F: drivers/regulator/wm8*.c F: drivers/video/backlight/wm83*_bl.c F: drivers/watchdog/wm83*_wdt.c +F: include/linux/mfd/arizona/ F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ F: include/linux/mfd/wm8400* F: include/linux/wm97xx.h F: include/sound/wm????.h +F: sound/soc/codecs/arizona.? F: sound/soc/codecs/wm* WORKQUEUE @@ -1,7 +1,7 @@ VERSION = 3 -PATCHLEVEL = 5 +PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc2 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index d5b9b5e645cc..9944dedee5b1 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -18,6 +18,8 @@ config ALPHA select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_SMP_IDLE_THREAD select GENERIC_CMOS_UPDATE + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 3bb7ffeae3bc..c2cbe4fc391c 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -14,8 +14,8 @@ */ -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h index db00f7885faa..e477bcd5b94a 100644 --- a/arch/alpha/include/asm/fpu.h +++ b/arch/alpha/include/asm/fpu.h @@ -1,7 +1,9 @@ #ifndef __ASM_ALPHA_FPU_H #define __ASM_ALPHA_FPU_H +#ifdef __KERNEL__ #include <asm/special_insns.h> +#endif /* * Alpha floating-point control register defines: diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h index fd698a174f26..b87755a19554 100644 --- a/arch/alpha/include/asm/ptrace.h +++ b/arch/alpha/include/asm/ptrace.h @@ -76,7 +76,10 @@ struct switch_stack { #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) -#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) +#define current_pt_regs() \ + ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1) + +#define force_successful_syscall_return() (current_pt_regs()->r0 = 0) #endif diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index dcb221a4b5be..7d2f75be932e 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h @@ -76,9 +76,11 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#ifdef __KERNEL__ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 +#endif /* __KERNEL__ */ #endif /* _ASM_SOCKET_H */ diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index b49ec2f8d6e3..766fdfde2b7a 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -433,36 +433,12 @@ clear_user(void __user *to, long len) #undef __module_address #undef __module_call -/* Returns: -EFAULT if exception before terminator, N if the entire - buffer filled, else strlen. */ +#define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) -extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); - -extern inline long -strncpy_from_user(char *to, const char __user *from, long n) -{ - long ret = -EFAULT; - if (__access_ok((unsigned long)from, 0, get_fs())) - ret = __strncpy_from_user(to, from, n); - return ret; -} - -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -extern long __strlen_user(const char __user *); - -extern inline long strlen_user(const char __user *str) -{ - return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; -} - -/* Returns: 0 if exception before NUL or reaching the supplied limit (N), - * a value greater than N if the limit would be exceeded, else strlen. */ -extern long __strnlen_user(const char __user *, long); - -extern inline long strnlen_user(const char __user *str, long n) -{ - return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; -} +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); /* * About the exception table: diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 633b23b0664a..a31a78eac9b9 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -465,10 +465,12 @@ #define __NR_setns 501 #define __NR_accept4 502 #define __NR_sendmmsg 503 +#define __NR_process_vm_readv 504 +#define __NR_process_vm_writev 505 #ifdef __KERNEL__ -#define NR_SYSCALLS 504 +#define NR_SYSCALLS 506 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6b340d0f1521 --- /dev/null +++ b/arch/alpha/include/asm/word-at-a-time.h @@ -0,0 +1,55 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include <asm/compiler.h> + +/* + * word-at-a-time interface for Alpha. + */ + +/* + * We do not use the word_at_a_time struct on Alpha, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpbge(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) + /* Simple if have CIX instructions */ + return __kernel_cttz(bits); +#else + unsigned long t1, t2, t3; + /* Retain lowest set bit only */ + bits &= -bits; + /* Binary search for lowest set bit */ + t1 = bits & 0xf0; + t2 = bits & 0xcc; + t3 = bits & 0xaa; + if (t1) t1 = 4; + if (t2) t2 = 2; + if (t3) t3 = 1; + return t1 + t2 + t3; +#endif +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index d96e742d4dc2..15fa821d09cd 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s); /* entry.S */ EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(kernel_execve); /* Networking helper routines. */ EXPORT_SYMBOL(csum_tcpudp_magic); @@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul); */ EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__strnlen_user); /* * SMP-specific symbols. diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index 6d159cee5f2f..ec0da0567ab5 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -663,58 +663,6 @@ kernel_thread: br ret_to_kernel .end kernel_thread -/* - * kernel_execve(path, argv, envp) - */ - .align 4 - .globl kernel_execve - .ent kernel_execve -kernel_execve: - /* We can be called from a module. */ - ldgp $gp, 0($27) - lda $sp, -(32+SIZEOF_PT_REGS+8)($sp) - .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0 - stq $26, 0($sp) - stq $16, 8($sp) - stq $17, 16($sp) - stq $18, 24($sp) - .prologue 1 - - lda $16, 32($sp) - lda $17, 0 - lda $18, SIZEOF_PT_REGS - bsr $26, memset !samegp - - /* Avoid the HAE being gratuitously wrong, which would cause us - to do the whole turn off interrupts thing and restore it. */ - ldq $2, alpha_mv+HAE_CACHE - stq $2, 152+32($sp) - - ldq $16, 8($sp) - ldq $17, 16($sp) - ldq $18, 24($sp) - lda $19, 32($sp) - bsr $26, do_execve !samegp - - ldq $26, 0($sp) - bne $0, 1f /* error! */ - - /* Move the temporary pt_regs struct from its current location - to the top of the kernel stack frame. See copy_thread for - details for a normal process. */ - lda $16, 0x4000 - SIZEOF_PT_REGS($8) - lda $17, 32($sp) - lda $18, SIZEOF_PT_REGS - bsr $26, memmove !samegp - - /* Take that over as our new stack frame and visit userland! */ - lda $sp, 0x4000 - SIZEOF_PT_REGS($8) - br $31, ret_from_sys_call - -1: lda $sp, 32+SIZEOF_PT_REGS+8($sp) - ret -.end kernel_execve - /* * Special system calls. Most of these are special in that they either @@ -797,115 +745,6 @@ sys_rt_sigreturn: .end sys_rt_sigreturn .align 4 - .globl sys_sethae - .ent sys_sethae -sys_sethae: - .prologue 0 - stq $16, 152($sp) - ret -.end sys_sethae - - .align 4 - .globl osf_getpriority - .ent osf_getpriority -osf_getpriority: - lda $sp, -16($sp) - stq $26, 0($sp) - .prologue 0 - - jsr $26, sys_getpriority - - ldq $26, 0($sp) - blt $0, 1f - - /* Return value is the unbiased priority, i.e. 20 - prio. - This does result in negative return values, so signal - no error by writing into the R0 slot. */ - lda $1, 20 - stq $31, 16($sp) - subl $1, $0, $0 - unop - -1: lda $sp, 16($sp) - ret -.end osf_getpriority - - .align 4 - .globl sys_getxuid - .ent sys_getxuid -sys_getxuid: - .prologue 0 - ldq $2, TI_TASK($8) - ldq $3, TASK_CRED($2) - ldl $0, CRED_UID($3) - ldl $1, CRED_EUID($3) - stq $1, 80($sp) - ret -.end sys_getxuid - - .align 4 - .globl sys_getxgid - .ent sys_getxgid -sys_getxgid: - .prologue 0 - ldq $2, TI_TASK($8) - ldq $3, TASK_CRED($2) - ldl $0, CRED_GID($3) - ldl $1, CRED_EGID($3) - stq $1, 80($sp) - ret -.end sys_getxgid - - .align 4 - .globl sys_getxpid - .ent sys_getxpid -sys_getxpid: - .prologue 0 - ldq $2, TI_TASK($8) - - /* See linux/kernel/timer.c sys_getppid for discussion - about this loop. */ - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - ldl $0, TASK_TGID($2) -1: ldl $1, TASK_TGID($4) -#ifdef CONFIG_SMP - mov $4, $5 - mb - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - cmpeq $4, $5, $5 - beq $5, 1b -#endif - stq $1, 80($sp) - ret -.end sys_getxpid - - .align 4 - .globl sys_alpha_pipe - .ent sys_alpha_pipe -sys_alpha_pipe: - lda $sp, -16($sp) - stq $26, 0($sp) - .prologue 0 - - mov $31, $17 - lda $16, 8($sp) - jsr $26, do_pipe_flags - - ldq $26, 0($sp) - bne $0, 1f - - /* The return values are in $0 and $20. */ - ldl $1, 12($sp) - ldl $0, 8($sp) - - stq $1, 80+16($sp) -1: lda $sp, 16($sp) - ret -.end sys_alpha_pipe - - .align 4 .globl sys_execve .ent sys_execve sys_execve: diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 98a103621af6..bc1acdda7a5e 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd, } #endif + +SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + This does result in negative return values, so signal + no error */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->r20 = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->r20 = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->r20 = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(alpha_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->r20 = fd[1]; + res = fd[0]; + } + return res; +} + +SYSCALL_DEFINE1(sethae, unsigned long, val) +{ + current_pt_regs()->hae = val; + return 0; +} diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 153d3fce3e8e..d6fde98b74b3 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -455,3 +455,22 @@ get_wchan(struct task_struct *p) } return pc; } + +int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) +{ + /* Avoid the HAE being gratuitously wrong, which would cause us + to do the whole turn off interrupts thing and restore it. */ + struct pt_regs regs = {.hae = alpha_mv.hae_cache}; + int err = do_execve(path, argv, envp, ®s); + if (!err) { + struct pt_regs *p = current_pt_regs(); + /* copy regs to normal position and off to userland we go... */ + *p = regs; + __asm__ __volatile__ ( + "mov %0, $sp;" + "br $31, ret_from_sys_call" + : : "r"(p)); + } + return err; +} +EXPORT_SYMBOL(kernel_execve); diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 87835235f114..2ac6b45c3e00 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -111,7 +111,7 @@ sys_call_table: .quad sys_socket .quad sys_connect .quad sys_accept - .quad osf_getpriority /* 100 */ + .quad sys_osf_getpriority /* 100 */ .quad sys_send .quad sys_recv .quad sys_sigreturn @@ -522,6 +522,8 @@ sys_call_table: .quad sys_setns .quad sys_accept4 .quad sys_sendmmsg + .quad sys_process_vm_readv + .quad sys_process_vm_writev /* 505 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index c0a83ab62b78..59660743237c 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ $(ev6-y)memchr.o \ $(ev6-y)copy_user.o \ $(ev6-y)clear_user.o \ - $(ev6-y)strncpy_from_user.o \ - $(ev67-y)strlen_user.o \ $(ev6-y)csum_ipv6_magic.o \ $(ev6-y)clear_page.o \ $(ev6-y)copy_page.o \ diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S deleted file mode 100644 index d2e28178cacc..000000000000 --- a/arch/alpha/lib/ev6-strncpy_from_user.S +++ /dev/null @@ -1,424 +0,0 @@ -/* - * arch/alpha/lib/ev6-strncpy_from_user.S - * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com> - * - * Just like strncpy except in the return value: - * - * -EFAULT if an exception occurs before the terminator is copied. - * N if the buffer filled. - * - * Otherwise the length of the string is returned. - * - * Much of the information about 21264 scheduling/coding comes from: - * Compiler Writer's Guide for the Alpha 21264 - * abbreviated as 'CWG' in other comments here - * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - * E - either cluster - * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * A bunch of instructions got moved and temp registers were changed - * to aid in scheduling. Control flow was also re-arranged to eliminate - * branches, and to provide longer code sequences to enable better scheduling. - * A total rewrite (using byte load/stores for start & tail sequences) - * is desirable, but very difficult to do without a from-scratch rewrite. - * Save that for the future. - */ - - -#include <asm/errno.h> -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda $31, $exception-99b($0); \ - .previous - - - .set noat - .set noreorder - .text - - .globl __strncpy_from_user - .ent __strncpy_from_user - .frame $30, 0, $26 - .prologue 0 - - .align 4 -__strncpy_from_user: - and a0, 7, t3 # E : find dest misalignment - beq a2, $zerolength # U : - - /* Are source and destination co-aligned? */ - mov a0, v0 # E : save the string start - xor a0, a1, t4 # E : - EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword - ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword - - addq a2, t3, a2 # E : bias count by dest misalignment - subq a2, 1, a3 # E : - addq zero, 1, t10 # E : - and t4, 7, t4 # E : misalignment between the two - - and a3, 7, t6 # E : number of tail bytes - sll t10, t6, t10 # E : t10 = bitmask of last count byte - bne t4, $unaligned # U : - lda t2, -1 # E : build a mask against false zero - - /* - * We are co-aligned; take care of a partial first word. - * On entry to this basic block: - * t0 == the first destination word for masking back in - * t1 == the first source word. - */ - - srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8 - addq a1, 8, a1 # E : - mskqh t2, a1, t2 # U : detection in the src word - nop - - /* Create the 1st output word and detect 0's in the 1st input word. */ - mskqh t1, a1, t3 # U : - mskql t0, a1, t0 # U : assemble the first output word - ornot t1, t2, t2 # E : - nop - - cmpbge zero, t2, t8 # E : bits set iff null found - or t0, t3, t0 # E : - beq a2, $a_eoc # U : - bne t8, $a_eos # U : 2nd branch in a quad. Bad. - - /* On entry to this basic block: - * t0 == a source quad not containing a null. - * a0 - current aligned destination address - * a1 - current aligned source address - * a2 - count of quadwords to move. - * NOTE: Loop improvement - unrolling this is going to be - * a huge win, since we're going to stall otherwise. - * Fix this later. For _really_ large copies, look - * at using wh64 on a look-ahead basis. See the code - * in clear_user.S and copy_user.S. - * Presumably, since (a0) and (a1) do not overlap (by C definition) - * Lots of nops here: - * - Separate loads from stores - * - Keep it to 1 branch/quadpack so the branch predictor - * can train. - */ -$a_loop: - stq_u t0, 0(a0) # L : - addq a0, 8, a0 # E : - nop - subq a2, 1, a2 # E : - - EX( ldq_u t0, 0(a1) ) # L : - addq a1, 8, a1 # E : - cmpbge zero, t0, t8 # E : Stall 2 cycles on t0 - beq a2, $a_eoc # U : - - beq t8, $a_loop # U : - nop - nop - nop - - /* Take care of the final (partial) word store. At this point - * the end-of-count bit is set in t8 iff it applies. - * - * On entry to this basic block we have: - * t0 == the source word containing the null - * t8 == the cmpbge mask that found it. - */ -$a_eos: - negq t8, t12 # E : find low bit set - and t8, t12, t12 # E : - - /* We're doing a partial word store and so need to combine - our source and original destination words. */ - ldq_u t1, 0(a0) # L : - subq t12, 1, t6 # E : - - or t12, t6, t8 # E : - zapnot t0, t8, t0 # U : clear src bytes > null - zap t1, t8, t1 # U : clear dst bytes <= null - or t0, t1, t0 # E : - - stq_u t0, 0(a0) # L : - br $finish_up # L0 : - nop - nop - - /* Add the end-of-count bit to the eos detection bitmask. */ - .align 4 -$a_eoc: - or t10, t8, t8 - br $a_eos - nop - nop - - -/* The source and destination are not co-aligned. Align the destination - and cope. We have to be very careful about not reading too much and - causing a SEGV. */ - - .align 4 -$u_head: - /* We know just enough now to be able to assemble the first - full source word. We can still find a zero at the end of it - that prevents us from outputting the whole thing. - - On entry to this basic block: - t0 == the first dest word, unmasked - t1 == the shifted low bits of the first source word - t6 == bytemask that is -1 in dest word bytes */ - - EX( ldq_u t2, 8(a1) ) # L : load second src word - addq a1, 8, a1 # E : - mskql t0, a0, t0 # U : mask trailing garbage in dst - extqh t2, a1, t4 # U : - - or t1, t4, t1 # E : first aligned src word complete - mskqh t1, a0, t1 # U : mask leading garbage in src - or t0, t1, t0 # E : first output word complete - or t0, t6, t6 # E : mask original data for zero test - - cmpbge zero, t6, t8 # E : - beq a2, $u_eocfin # U : - bne t8, $u_final # U : bad news - 2nd branch in a quad - lda t6, -1 # E : mask out the bits we have - - mskql t6, a1, t6 # U : already seen - stq_u t0, 0(a0) # L : store first output word - or t6, t2, t2 # E : - cmpbge zero, t2, t8 # E : find nulls in second partial - - addq a0, 8, a0 # E : - subq a2, 1, a2 # E : - bne t8, $u_late_head_exit # U : - nop - - /* Finally, we've got all the stupid leading edge cases taken care - of and we can set up to enter the main loop. */ - - extql t2, a1, t1 # U : position hi-bits of lo word - EX( ldq_u t2, 8(a1) ) # L : read next high-order source word - addq a1, 8, a1 # E : - cmpbge zero, t2, t8 # E : - - beq a2, $u_eoc # U : - bne t8, $u_eos # U : - nop - nop - - /* Unaligned copy main loop. In order to avoid reading too much, - the loop is structured to detect zeros in aligned source words. - This has, unfortunately, effectively pulled half of a loop - iteration out into the head and half into the tail, but it does - prevent nastiness from accumulating in the very thing we want - to run as fast as possible. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word - - We further know that t2 does not contain a null terminator. */ - - /* - * Extra nops here: - * separate load quads from store quads - * only one branch/quad to permit predictor training - */ - - .align 4 -$u_loop: - extqh t2, a1, t0 # U : extract high bits for current word - addq a1, 8, a1 # E : - extql t2, a1, t3 # U : extract low bits for next time - addq a0, 8, a0 # E : - - or t0, t1, t0 # E : current dst word now complete - EX( ldq_u t2, 0(a1) ) # L : load high word for next time - subq a2, 1, a2 # E : - nop - - stq_u t0, -8(a0) # L : save the current word - mov t3, t1 # E : - cmpbge zero, t2, t8 # E : test new word for eos - beq a2, $u_eoc # U : - - beq t8, $u_loop # U : - nop - nop - nop - - /* We've found a zero somewhere in the source word we just read. - If it resides in the lower half, we have one (probably partial) - word to write out, and if it resides in the upper half, we - have one full and one partial word left to write out. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word. */ - .align 4 -$u_eos: - extqh t2, a1, t0 # U : - or t0, t1, t0 # E : first (partial) source word complete - cmpbge zero, t0, t8 # E : is the null in this first bit? - nop - - bne t8, $u_final # U : - stq_u t0, 0(a0) # L : the null was in the high-order bits - addq a0, 8, a0 # E : - subq a2, 1, a2 # E : - - .align 4 -$u_late_head_exit: - extql t2, a1, t0 # U : - cmpbge zero, t0, t8 # E : - or t8, t10, t6 # E : - cmoveq a2, t6, t8 # E : - - /* Take care of a final (probably partial) result word. - On entry to this basic block: - t0 == assembled source word - t8 == cmpbge mask that found the null. */ - .align 4 -$u_final: - negq t8, t6 # E : isolate low bit set - and t6, t8, t12 # E : - ldq_u t1, 0(a0) # L : - subq t12, 1, t6 # E : - - or t6, t12, t8 # E : - zapnot t0, t8, t0 # U : kill source bytes > null - zap t1, t8, t1 # U : kill dest bytes <= null - or t0, t1, t0 # E : - - stq_u t0, 0(a0) # E : - br $finish_up # U : - nop - nop - - .align 4 -$u_eoc: # end-of-count - extqh t2, a1, t0 # U : - or t0, t1, t0 # E : - cmpbge zero, t0, t8 # E : - nop - - .align 4 -$u_eocfin: # end-of-count, final word - or t10, t8, t8 # E : - br $u_final # U : - nop - nop - - /* Unaligned copy entry point. */ - .align 4 -$unaligned: - - srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8 - and a0, 7, t4 # E : find dest misalignment - and a1, 7, t5 # E : find src misalignment - mov zero, t0 # E : - - /* Conditionally load the first destination word and a bytemask - with 0xff indicating that the destination byte is sacrosanct. */ - - mov zero, t6 # E : - beq t4, 1f # U : - ldq_u t0, 0(a0) # L : - lda t6, -1 # E : - - mskql t6, a0, t6 # E : - nop - nop - nop - - .align 4 -1: - subq a1, t4, a1 # E : sub dest misalignment from src addr - /* If source misalignment is larger than dest misalignment, we need - extra startup checks to avoid SEGV. */ - cmplt t4, t5, t12 # E : - extql t1, a1, t1 # U : shift src into place - lda t2, -1 # E : for creating masks later - - beq t12, $u_head # U : - mskqh t2, t5, t2 # U : begin src byte validity mask - cmpbge zero, t1, t8 # E : is there a zero? - nop - - extql t2, a1, t2 # U : - or t8, t10, t5 # E : test for end-of-count too - cmpbge zero, t2, t3 # E : - cmoveq a2, t5, t8 # E : Latency=2, extra map slot - - nop # E : goes with cmov - andnot t8, t3, t8 # E : - beq t8, $u_head # U : - nop - - /* At this point we've found a zero in the first partial word of - the source. We need to isolate the valid source data and mask - it into the original destination data. (Incidentally, we know - that we'll need at least one byte of that original dest word.) */ - - ldq_u t0, 0(a0) # L : - negq t8, t6 # E : build bitmask of bytes <= zero - mskqh t1, t4, t1 # U : - and t6, t8, t12 # E : - - subq t12, 1, t6 # E : - or t6, t12, t8 # E : - zapnot t2, t8, t2 # U : prepare source word; mirror changes - zapnot t1, t8, t1 # U : to source validity mask - - andnot t0, t2, t0 # E : zero place for source to reside - or t0, t1, t0 # E : and put it there - stq_u t0, 0(a0) # L : - nop - - .align 4 -$finish_up: - zapnot t0, t12, t4 # U : was last byte written null? - and t12, 0xf0, t3 # E : binary search for the address of the - cmovne t4, 1, t4 # E : Latency=2, extra map slot - nop # E : with cmovne - - and t12, 0xcc, t2 # E : last byte written - and t12, 0xaa, t1 # E : - cmovne t3, 4, t3 # E : Latency=2, extra map slot - nop # E : with cmovne - - bic a0, 7, t0 - cmovne t2, 2, t2 # E : Latency=2, extra map slot - nop # E : with cmovne - nop - - cmovne t1, 1, t1 # E : Latency=2, extra map slot - nop # E : with cmovne - addq t0, t3, t0 # E : - addq t1, t2, t1 # E : - - addq t0, t1, t0 # E : - addq t0, t4, t0 # add one if we filled the buffer - subq t0, v0, v0 # find string length - ret # L0 : - - .align 4 -$zerolength: - nop - nop - nop - clr v0 - -$exception: - nop - nop - nop - ret - - .end __strncpy_from_user diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S deleted file mode 100644 index 57e0d77b81a6..000000000000 --- a/arch/alpha/lib/ev67-strlen_user.S +++ /dev/null @@ -1,107 +0,0 @@ -/* - * arch/alpha/lib/ev67-strlen_user.S - * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com> - * - * Return the length of the string including the NULL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - * - * Much of the information about 21264 scheduling/coding comes from: - * Compiler Writer's Guide for the Alpha 21264 - * abbreviated as 'CWG' in other comments here - * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - * E - either cluster - * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * Try not to change the actual algorithm if possible for consistency. - */ - -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda v0, $exception-99b(zero); \ - .previous - - - .set noreorder - .set noat - .text - - .globl __strlen_user - .ent __strlen_user - .frame sp, 0, ra - - .align 4 -__strlen_user: - ldah a1, 32767(zero) # do not use plain strlen_user() for strings - # that might be almost 2 GB long; you should - # be using strnlen_user() instead - nop - nop - nop - - .globl __strnlen_user - - .align 4 -__strnlen_user: - .prologue 0 - EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned) - lda t1, -1(zero) # E : - - insqh t1, a0, t1 # U : - andnot a0, 7, v0 # E : - or t1, t0, t0 # E : - subq a0, 1, a0 # E : get our +1 for the return - - cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0 - subq a1, 7, t2 # E : - subq a0, v0, t0 # E : - bne t1, $found # U : - - addq t2, t0, t2 # E : - addq a1, 1, a1 # E : - nop # E : - nop # E : - - .align 4 -$loop: ble t2, $limit # U : - EX( ldq t0, 8(v0) ) # L : - nop # E : - nop # E : - - cmpbge zero, t0, t1 # E : - subq t2, 8, t2 # E : - addq v0, 8, v0 # E : addr += 8 - beq t1, $loop # U : - -$found: cttz t1, t2 # U0 : - addq v0, t2, v0 # E : - subq v0, a0, v0 # E : - ret # L0 : - -$exception: - nop - nop - nop - ret - - .align 4 # currently redundant -$limit: - nop - nop - subq a1, t2, v0 - ret - - .end __strlen_user diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S deleted file mode 100644 index 508a18e96479..000000000000 --- a/arch/alpha/lib/strlen_user.S +++ /dev/null @@ -1,91 +0,0 @@ -/* - * arch/alpha/lib/strlen_user.S - * - * Return the length of the string including the NUL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - */ - -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda v0, $exception-99b(zero); \ - .previous - - - .set noreorder - .set noat - .text - - .globl __strlen_user - .ent __strlen_user - .frame sp, 0, ra - - .align 3 -__strlen_user: - ldah a1, 32767(zero) # do not use plain strlen_user() for strings - # that might be almost 2 GB long; you should - # be using strnlen_user() instead - - .globl __strnlen_user - - .align 3 -__strnlen_user: - .prologue 0 - - EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned) - lda t1, -1(zero) - insqh t1, a0, t1 - andnot a0, 7, v0 - or t1, t0, t0 - subq a0, 1, a0 # get our +1 for the return - cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0 - subq a1, 7, t2 - subq a0, v0, t0 - bne t1, $found - - addq t2, t0, t2 - addq a1, 1, a1 - - .align 3 -$loop: ble t2, $limit - EX( ldq t0, 8(v0) ) - subq t2, 8, t2 - addq v0, 8, v0 # addr += 8 - cmpbge zero, t0, t1 - beq t1, $loop - -$found: negq t1, t2 # clear all but least set bit - and t1, t2, t1 - - and t1, 0xf0, t2 # binary search for that set bit - and t1, 0xcc, t3 - and t1, 0xaa, t4 - cmovne t2, 4, t2 - cmovne t3, 2, t3 - cmovne t4, 1, t4 - addq t2, t3, t2 - addq v0, t4, v0 - addq v0, t2, v0 - nop # dual issue next two on ev4 and ev5 - subq v0, a0, v0 -$exception: - ret - - .align 3 # currently redundant -$limit: - subq a1, t2, v0 - ret - - .end __strlen_user diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S deleted file mode 100644 index 73ee21160ff7..000000000000 --- a/arch/alpha/lib/strncpy_from_user.S +++ /dev/null @@ -1,339 +0,0 @@ -/* - * arch/alpha/lib/strncpy_from_user.S - * Contributed by Richard Henderson (rth@tamu.edu) - * - * Just like strncpy except in the return value: - * - * -EFAULT if an exception occurs before the terminator is copied. - * N if the buffer filled. - * - * Otherwise the length of the string is returned. - */ - - -#include <asm/errno.h> -#include <asm/regdef.h> - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda $31, $exception-99b($0); \ - .previous - - - .set noat - .set noreorder - .text - - .globl __strncpy_from_user - .ent __strncpy_from_user - .frame $30, 0, $26 - .prologue 0 - - .align 3 -$aligned: - /* On entry to this basic block: - t0 == the first destination word for masking back in - t1 == the first source word. */ - - /* Create the 1st output word and detect 0's in the 1st input word. */ - lda t2, -1 # e1 : build a mask against false zero - mskqh t2, a1, t2 # e0 : detection in the src word - mskqh t1, a1, t3 # e0 : - ornot t1, t2, t2 # .. e1 : - mskql t0, a1, t0 # e0 : assemble the first output word - cmpbge zero, t2, t8 # .. e1 : bits set iff null found - or t0, t3, t0 # e0 : - beq a2, $a_eoc # .. e1 : - bne t8, $a_eos # .. e1 : - - /* On entry to this basic block: - t0 == a source word not containing a null. */ - -$a_loop: - stq_u t0, 0(a0) # e0 : - addq a0, 8, a0 # .. e1 : - EX( ldq_u t0, 0(a1) ) # e0 : - addq a1, 8, a1 # .. e1 : - subq a2, 1, a2 # e0 : - cmpbge zero, t0, t8 # .. e1 (stall) - beq a2, $a_eoc # e1 : - beq t8, $a_loop # e1 : - - /* Take care of the final (partial) word store. At this point - the end-of-count bit is set in t8 iff it applies. - - On entry to this basic block we have: - t0 == the source word containing the null - t8 == the cmpbge mask that found it. */ - -$a_eos: - negq t8, t12 # e0 : find low bit set - and t8, t12, t12 # e1 (stall) - - /* For the sake of the cache, don't read a destination word - if we're not going to need it. */ - and t12, 0x80, t6 # e0 : - bne t6, 1f # .. e1 (zdb) - - /* We're doing a partial word store and so need to combine - our source and original destination words. */ - ldq_u t1, 0(a0) # e0 : - subq t12, 1, t6 # .. e1 : - or t12, t6, t8 # e0 : - unop # - zapnot t0, t8, t0 # e0 : clear src bytes > null - zap t1, t8, t1 # .. e1 : clear dst bytes <= null - or t0, t1, t0 # e1 : - -1: stq_u t0, 0(a0) - br $finish_up - - /* Add the end-of-count bit to the eos detection bitmask. */ -$a_eoc: - or t10, t8, t8 - br $a_eos - - /*** The Function Entry Point ***/ - .align 3 -__strncpy_from_user: - mov a0, v0 # save the string start - beq a2, $zerolength - - /* Are source and destination co-aligned? */ - xor a0, a1, t1 # e0 : - and a0, 7, t0 # .. e1 : find dest misalignment - and t1, 7, t1 # e0 : - addq a2, t0, a2 # .. e1 : bias count by dest misalignment - subq a2, 1, a2 # e0 : - and a2, 7, t2 # e1 : - srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8 - addq zero, 1, t10 # .. e1 : - sll t10, t2, t10 # e0 : t10 = bitmask of last count byte - bne t1, $unaligned # .. e1 : - - /* We are co-aligned; take care of a partial first word. */ - - EX( ldq_u t1, 0(a1) ) # e0 : load first src word - addq a1, 8, a1 # .. e1 : - - beq t0, $aligned # avoid loading dest word if not needed - ldq_u t0, 0(a0) # e0 : - br $aligned # .. e1 : - - -/* The source and destination are not co-aligned. Align the destination - and cope. We have to be very careful about not reading too much and - causing a SEGV. */ - - .align 3 -$u_head: - /* We know just enough now to be able to assemble the first - full source word. We can still find a zero at the end of it - that prevents us from outputting the whole thing. - - On entry to this basic block: - t0 == the first dest word, unmasked - t1 == the shifted low bits of the first source word - t6 == bytemask that is -1 in dest word bytes */ - - EX( ldq_u t2, 8(a1) ) # e0 : load second src word - addq a1, 8, a1 # .. e1 : - mskql t0, a0, t0 # e0 : mask trailing garbage in dst - extqh t2, a1, t4 # e0 : - or t1, t4, t1 # e1 : first aligned src word complete - mskqh t1, a0, t1 # e0 : mask leading garbage in src - or t0, t1, t0 # e0 : first output word complete - or t0, t6, t6 # e1 : mask original data for zero test - cmpbge zero, t6, t8 # e0 : - beq a2, $u_eocfin # .. e1 : - bne t8, $u_final # e1 : - - lda t6, -1 # e1 : mask out the bits we have - mskql t6, a1, t6 # e0 : already seen - stq_u t0, 0(a0) # e0 : store first output word - or t6, t2, t2 # .. e1 : - cmpbge zero, t2, t8 # e0 : find nulls in second partial - addq a0, 8, a0 # .. e1 : - subq a2, 1, a2 # e0 : - bne t8, $u_late_head_exit # .. e1 : - - /* Finally, we've got all the stupid leading edge cases taken care - of and we can set up to enter the main loop. */ - - extql t2, a1, t1 # e0 : position hi-bits of lo word - EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word - addq a1, 8, a1 # e0 : - cmpbge zero, t2, t8 # e1 (stall) - beq a2, $u_eoc # e1 : - bne t8, $u_eos # e1 : - - /* Unaligned copy main loop. In order to avoid reading too much, - the loop is structured to detect zeros in aligned source words. - This has, unfortunately, effectively pulled half of a loop - iteration out into the head and half into the tail, but it does - prevent nastiness from accumulating in the very thing we want - to run as fast as possible. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word - - We further know that t2 does not contain a null terminator. */ - - .align 3 -$u_loop: - extqh t2, a1, t0 # e0 : extract high bits for current word - addq a1, 8, a1 # .. e1 : - extql t2, a1, t3 # e0 : extract low bits for next time - addq a0, 8, a0 # .. e1 : - or t0, t1, t0 # e0 : current dst word now complete - EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time - stq_u t0, -8(a0) # e0 : save the current word - mov t3, t1 # .. e1 : - subq a2, 1, a2 # e0 : - cmpbge zero, t2, t8 # .. e1 : test new word for eos - beq a2, $u_eoc # e1 : - beq t8, $u_loop # e1 : - - /* We've found a zero somewhere in the source word we just read. - If it resides in the lower half, we have one (probably partial) - word to write out, and if it resides in the upper half, we - have one full and one partial word left to write out. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word. */ -$u_eos: - extqh t2, a1, t0 # e0 : - or t0, t1, t0 # e1 : first (partial) source word complete - - cmpbge zero, t0, t8 # e0 : is the null in this first bit? - bne t8, $u_final # .. e1 (zdb) - - stq_u t0, 0(a0) # e0 : the null was in the high-order bits - addq a0, 8, a0 # .. e1 : - subq a2, 1, a2 # e1 : - -$u_late_head_exit: - extql t2, a1, t0 # .. e0 : - cmpbge zero, t0, t8 # e0 : - or t8, t10, t6 # e1 : - cmoveq a2, t6, t8 # e0 : - nop # .. e1 : - - /* Take care of a final (probably partial) result word. - On entry to this basic block: - t0 == assembled source word - t8 == cmpbge mask that found the null. */ -$u_final: - negq t8, t6 # e0 : isolate low bit set - and t6, t8, t12 # e1 : - - and t12, 0x80, t6 # e0 : avoid dest word load if we can - bne t6, 1f # .. e1 (zdb) - - ldq_u t1, 0(a0) # e0 : - subq t12, 1, t6 # .. e1 : - or t6, t12, t8 # e0 : - zapnot t0, t8, t0 # .. e1 : kill source bytes > null - zap t1, t8, t1 # e0 : kill dest bytes <= null - or t0, t1, t0 # e1 : - -1: stq_u t0, 0(a0) # e0 : - br $finish_up - -$u_eoc: # end-of-count - extqh t2, a1, t0 - or t0, t1, t0 - cmpbge zero, t0, t8 - -$u_eocfin: # end-of-count, final word - or t10, t8, t8 - br $u_final - - /* Unaligned copy entry point. */ - .align 3 -$unaligned: - - EX( ldq_u t1, 0(a1) ) # e0 : load first source word - - and a0, 7, t4 # .. e1 : find dest misalignment - and a1, 7, t5 # e0 : find src misalignment - - /* Conditionally load the first destination word and a bytemask - with 0xff indicating that the destination byte is sacrosanct. */ - - mov zero, t0 # .. e1 : - mov zero, t6 # e0 : - beq t4, 1f # .. e1 : - ldq_u t0, 0(a0) # e0 : - lda t6, -1 # .. e1 : - mskql t6, a0, t6 # e0 : -1: - subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr - - /* If source misalignment is larger than dest misalignment, we need - extra startup checks to avoid SEGV. */ - - cmplt t4, t5, t12 # e1 : - extql t1, a1, t1 # .. e0 : shift src into place - lda t2, -1 # e0 : for creating masks later - beq t12, $u_head # e1 : - - mskqh t2, t5, t2 # e0 : begin src byte validity mask - cmpbge zero, t1, t8 # .. e1 : is there a zero? - extql t2, a1, t2 # e0 : - or t8, t10, t5 # .. e1 : test for end-of-count too - cmpbge zero, t2, t3 # e0 : - cmoveq a2, t5, t8 # .. e1 : - andnot t8, t3, t8 # e0 : - beq t8, $u_head # .. e1 (zdb) - - /* At this point we've found a zero in the first partial word of - the source. We need to isolate the valid source data and mask - it into the original destination data. (Incidentally, we know - that we'll need at least one byte of that original dest word.) */ - - ldq_u t0, 0(a0) # e0 : - negq t8, t6 # .. e1 : build bitmask of bytes <= zero - mskqh t1, t4, t1 # e0 : - and t6, t8, t12 # .. e1 : - subq t12, 1, t6 # e0 : - or t6, t12, t8 # e1 : - - zapnot t2, t8, t2 # e0 : prepare source word; mirror changes - zapnot t1, t8, t1 # .. e1 : to source validity mask - - andnot t0, t2, t0 # e0 : zero place for source to reside - or t0, t1, t0 # e1 : and put it there - stq_u t0, 0(a0) # e0 : - -$finish_up: - zapnot t0, t12, t4 # was last byte written null? - cmovne t4, 1, t4 - - and t12, 0xf0, t3 # binary search for the address of the - and t12, 0xcc, t2 # last byte written - and t12, 0xaa, t1 - bic a0, 7, t0 - cmovne t3, 4, t3 - cmovne t2, 2, t2 - cmovne t1, 1, t1 - addq t0, t3, t0 - addq t1, t2, t1 - addq t0, t1, t0 - addq t0, t4, t0 # add one if we filled the buffer - - subq t0, v0, v0 # find string length - ret - -$zerolength: - clr v0 -$exception: - ret - - .end __strncpy_from_user diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 5eecab1a84ef..0c4132dd3507 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr, const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; + unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (cause > 0 ? FAULT_FLAG_WRITE : 0)); /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs @@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto vmalloc_fault; #endif +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) @@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); - up_read(&mm->mmap_sem); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } + + up_read(&mm->mmap_sem); + return; /* Something tried to access memory that isn't in our memory map. @@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: + up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: + up_read(&mm->mmap_sem); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index a0a5d27aa215..b8ce18f485d3 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c @@ -12,6 +12,7 @@ #include <linux/smp.h> #include <linux/errno.h> #include <asm/ptrace.h> +#include <asm/special_insns.h> #include "op_impl.h" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7980873525b2..6d6e18fee9fe 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -38,7 +38,6 @@ config ARM select HARDIRQS_SW_RESEND select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_IRQ_PROBE select ARCH_WANT_IPC_PARSE_VERSION select HARDIRQS_SW_RESEND select CPU_PM if (SUSPEND || CPU_IDLE) @@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT bool default y -config GENERIC_LOCKBREAK - bool - default y - depends on SMP && PREEMPT - config RWSEM_GENERIC_SPINLOCK bool default y @@ -1151,6 +1145,7 @@ config PLAT_ORION bool select CLKSRC_MMIO select GENERIC_IRQ_CHIP + select IRQ_DOMAIN select COMMON_CLK config PLAT_PXA diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index e1fa7e6edfe8..71d6b5d0daf1 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi @@ -12,7 +12,7 @@ * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * - * Contains definitions specific to the Armada 370 SoC that are not + * Contains definitions specific to the Armada XP SoC that are not * common to all Armada SoCs. */ diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index a874dbfb5ae6..e6138310e5ce 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -51,11 +51,11 @@ dma-apbh@80004000 { compatible = "fsl,imx23-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; ecc@80008000 { - reg = <0x80008000 2000>; + reg = <0x80008000 0x2000>; status = "disabled"; }; @@ -63,7 +63,7 @@ compatible = "fsl,imx23-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <13>, <56>; interrupt-names = "gpmi-dma", "bch"; @@ -72,14 +72,14 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <15 14>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; etm@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; status = "disabled"; }; @@ -87,7 +87,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx23-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx23-gpio", "fsl,mxs-gpio"; @@ -273,32 +273,32 @@ }; emi@80020000 { - reg = <0x80020000 2000>; + reg = <0x80020000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx23-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; @@ -310,14 +310,14 @@ }; ssp1: ssp@80034000 { - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <2 20>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; tvenc@80038000 { - reg = <0x80038000 2000>; + reg = <0x80038000 0x2000>; status = "disabled"; }; }; @@ -330,37 +330,37 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; status = "disabled"; }; audio-out@80048000 { - reg = <0x80048000 2000>; + reg = <0x80048000 0x2000>; status = "disabled"; }; audio-in@8004c000 { - reg = <0x8004c000 2000>; + reg = <0x8004c000 0x2000>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; @@ -370,26 +370,26 @@ }; i2c@80058000 { - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; status = "disabled"; }; rtc@8005c000 { compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x8005c000 2000>; + reg = <0x8005c000 0x2000>; interrupts = <22>; }; pwm: pwm@80064000 { compatible = "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <5>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; @@ -429,7 +429,7 @@ ranges; usbctrl@80080000 { - reg = <0x80080000 0x10000>; + reg = <0x80080000 0x40000>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts index d3f8296e19e0..0a8978a40ece 100644 --- a/arch/arm/boot/dts/imx27-3ds.dts +++ b/arch/arm/boot/dts/imx27-3ds.dts @@ -27,7 +27,7 @@ status = "okay"; }; - uart@1000a000 { + uart1: serial@1000a000 { fsl,uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 00bae3aad5ab..5303ab680a34 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -19,6 +19,12 @@ serial3 = &uart4; serial4 = &uart5; serial5 = &uart6; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; }; avic: avic-interrupt-controller@e0000000 { diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 787efac68da8..3fa6d190fab4 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -57,18 +57,18 @@ }; hsadc@80002000 { - reg = <0x80002000 2000>; + reg = <0x80002000 0x2000>; interrupts = <13 87>; status = "disabled"; }; dma-apbh@80004000 { compatible = "fsl,imx28-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; perfmon@80006000 { - reg = <0x80006000 800>; + reg = <0x80006000 0x800>; interrupts = <27>; status = "disabled"; }; @@ -77,7 +77,7 @@ compatible = "fsl,imx28-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <88>, <41>; interrupt-names = "gpmi-dma", "bch"; @@ -86,28 +86,28 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <96 82>; fsl,ssp-dma-channel = <0>; status = "disabled"; }; ssp1: ssp@80012000 { - reg = <0x80012000 2000>; + reg = <0x80012000 0x2000>; interrupts = <97 83>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; ssp2: ssp@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; interrupts = <98 84>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; ssp3: ssp@80016000 { - reg = <0x80016000 2000>; + reg = <0x80016000 0x2000>; interrupts = <99 85>; fsl,ssp-dma-channel = <3>; status = "disabled"; @@ -117,7 +117,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx28-gpio", "fsl,mxs-gpio"; @@ -510,96 +510,96 @@ }; digctl@8001c000 { - reg = <0x8001c000 2000>; + reg = <0x8001c000 0x2000>; interrupts = <89>; status = "disabled"; }; etm@80022000 { - reg = <0x80022000 2000>; + reg = <0x80022000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx28-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; interrupts = <52 53 54>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; interrupts = <39>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; lcdif@80030000 { compatible = "fsl,imx28-lcdif"; - reg = <0x80030000 2000>; + reg = <0x80030000 0x2000>; interrupts = <38 86>; status = "disabled"; }; can0: can@80032000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80032000 2000>; + reg = <0x80032000 0x2000>; interrupts = <8>; status = "disabled"; }; can1: can@80034000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <9>; status = "disabled"; }; simdbg@8003c000 { - reg = <0x8003c000 200>; + reg = <0x8003c000 0x200>; status = "disabled"; }; simgpmisel@8003c200 { - reg = <0x8003c200 100>; + reg = <0x8003c200 0x100>; status = "disabled"; }; simsspsel@8003c300 { - reg = <0x8003c300 100>; + reg = <0x8003c300 0x100>; status = "disabled"; }; simmemsel@8003c400 { - reg = <0x8003c400 100>; + reg = <0x8003c400 0x100>; status = "disabled"; }; gpiomon@8003c500 { - reg = <0x8003c500 100>; + reg = <0x8003c500 0x100>; status = "disabled"; }; simenet@8003c700 { - reg = <0x8003c700 100>; + reg = <0x8003c700 0x100>; status = "disabled"; }; armjtag@8003c800 { - reg = <0x8003c800 100>; + reg = <0x8003c800 0x100>; status = "disabled"; }; }; @@ -612,45 +612,45 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { compatible = "fsl,imx28-saif"; - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; interrupts = <59 80>; fsl,saif-dma-channel = <4>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { compatible = "fsl,imx28-saif"; - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; interrupts = <58 81>; fsl,saif-dma-channel = <5>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; spdif@80054000 { - reg = <0x80054000 2000>; + reg = <0x80054000 0x2000>; interrupts = <45 66>; status = "disabled"; }; rtc@80056000 { compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x80056000 2000>; + reg = <0x80056000 0x2000>; interrupts = <29>; }; @@ -658,7 +658,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; interrupts = <111 68>; clock-frequency = <100000>; status = "disabled"; @@ -668,7 +668,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x8005a000 2000>; + reg = <0x8005a000 0x2000>; interrupts = <110 69>; clock-frequency = <100000>; status = "disabled"; @@ -676,14 +676,14 @@ pwm: pwm@80064000 { compatible = "fsl,imx28-pwm", "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <8>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts index de065b5976e6..cd86177a3ea2 100644 --- a/arch/arm/boot/dts/imx51-babbage.dts +++ b/arch/arm/boot/dts/imx51-babbage.dts @@ -53,7 +53,7 @@ spi-max-frequency = <6000000>; reg = <0>; interrupt-parent = <&gpio1>; - interrupts = <8>; + interrupts = <8 0x4>; regulators { sw1_reg: sw1 { diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 53cbaa3d4f90..aba28dc87fc8 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -17,6 +17,10 @@ serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; }; tzic: tz-interrupt-controller@e0000000 { diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index 5b8eafcdbeec..da895e93a999 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts @@ -64,12 +64,32 @@ reg = <0xf4000000 0x2000000>; phy-mode = "mii"; interrupt-parent = <&gpio2>; - interrupts = <31>; + interrupts = <31 0x8>; reg-io-width = <4>; + /* + * VDD33A and VDDVARIO of LAN9220 are supplied by + * SW4_3V3 of LTC3589. Before the regulator driver + * for this PMIC is available, we use a fixed dummy + * 3V3 regulator to get LAN9220 driver probing work. + */ + vdd33a-supply = <®_3p3v>; + vddvario-supply = <®_3p3v>; smsc,irq-push-pull; }; }; + regulators { + compatible = "simple-bus"; + + reg_3p3v: 3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; + gpio-keys { compatible = "gpio-keys"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index fc79cdc4b4e6..cd37165edce5 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; tzic: tz-interrupt-controller@0fffc000 { diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts index d42e851ceb97..72f30f3e6171 100644 --- a/arch/arm/boot/dts/imx6q-sabrelite.dts +++ b/arch/arm/boot/dts/imx6q-sabrelite.dts @@ -53,6 +53,7 @@ fsl,pins = < 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */ 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */ + 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */ >; }; }; diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 3d3c64b014e6..fd57079f71a9 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; cpus { diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts index 9a33077130e8..5bb0bf39d3b8 100644 --- a/arch/arm/boot/dts/kirkwood-dns320.dts +++ b/arch/arm/boot/dts/kirkwood-dns320.dts @@ -1,6 +1,6 @@ /dts-v1/; -/include/ "kirkwood.dtsi" +/include/ "kirkwood-dnskw.dtsi" / { model = "D-Link DNS-320 NAS (Rev A1)"; @@ -15,6 +15,31 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; + gpio-leds { + compatible = "gpio-leds"; + blue-power { + label = "dns320:blue:power"; + gpios = <&gpio0 26 1>; /* GPIO 26 Active Low */ + linux,default-trigger = "default-on"; + }; + blue-usb { + label = "dns320:blue:usb"; + gpios = <&gpio1 11 1>; /* GPIO 43 Active Low */ + }; + orange-l_hdd { + label = "dns320:orange:l_hdd"; + gpios = <&gpio0 28 1>; /* GPIO 28 Active Low */ + }; + orange-r_hdd { + label = "dns320:orange:r_hdd"; + gpios = <&gpio0 27 1>; /* GPIO 27 Active Low */ + }; + orange-usb { + label = "dns320:orange:usb"; + gpios = <&gpio1 3 1>; /* GPIO 35 Active Low */ + }; + }; + ocp@f1000000 { serial@12000 { clock-frequency = <166666667>; @@ -25,40 +50,5 @@ clock-frequency = <166666667>; status = "okay"; }; - - nand@3000000 { - status = "okay"; - - partition@0 { - label = "u-boot"; - reg = <0x0000000 0x100000>; - read-only; - }; - - partition@100000 { - label = "uImage"; - reg = <0x0100000 0x500000>; - }; - - partition@600000 { - label = "ramdisk"; - reg = <0x0600000 0x500000>; - }; - - partition@b00000 { - label = "image"; - reg = <0x0b00000 0x6600000>; - }; - - partition@7100000 { - label = "mini firmware"; - reg = <0x7100000 0xa00000>; - }; - - partition@7b00000 { - label = "config"; - reg = <0x7b00000 0x500000>; - }; - }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts index 16734c1b5dfe..d430713ea9b9 100644 --- a/arch/arm/boot/dts/kirkwood-dns325.dts +++ b/arch/arm/boot/dts/kirkwood-dns325.dts @@ -1,6 +1,6 @@ /dts-v1/; -/include/ "kirkwood.dtsi" +/include/ "kirkwood-dnskw.dtsi" / { model = "D-Link DNS-325 NAS (Rev A1)"; @@ -15,45 +15,43 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; - ocp@f1000000 { - serial@12000 { - clock-frequency = <200000000>; - status = "okay"; + gpio-leds { + compatible = "gpio-leds"; + white-power { + label = "dns325:white:power"; + gpios = <&gpio0 26 1>; /* GPIO 26 Active Low */ + linux,default-trigger = "default-on"; + }; + white-usb { + label = "dns325:white:usb"; + gpios = <&gpio1 11 1>; /* GPIO 43 Active Low */ + }; + red-l_hdd { + label = "dns325:red:l_hdd"; + gpios = <&gpio0 28 1>; /* GPIO 28 Active Low */ }; + red-r_hdd { + label = "dns325:red:r_hdd"; + gpios = <&gpio0 27 1>; /* GPIO 27 Active Low */ + }; + red-usb { + label = "dns325:red:usb"; + gpios = <&gpio0 29 1>; /* GPIO 29 Active Low */ + }; + }; - nand@3000000 { + ocp@f1000000 { + i2c@11000 { status = "okay"; - partition@0 { - label = "u-boot"; - reg = <0x0000000 0x100000>; - read-only; - }; - - partition@100000 { - label = "uImage"; - reg = <0x0100000 0x500000>; - }; - - partition@600000 { - label = "ramdisk"; - reg = <0x0600000 0x500000>; - }; - - partition@b00000 { - label = "image"; - reg = <0x0b00000 0x6600000>; - }; - - partition@7100000 { - label = "mini firmware"; - reg = <0x7100000 0xa00000>; - }; - - partition@7b00000 { - label = "config"; - reg = <0x7b00000 0x500000>; + lm75: lm75@48 { + compatible = "national,lm75"; + reg = <0x48>; }; }; + serial@12000 { + clock-frequency = <200000000>; + status = "okay"; + }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi new file mode 100644 index 000000000000..7408655f91b5 --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi @@ -0,0 +1,69 @@ +/include/ "kirkwood.dtsi" + +/ { + model = "D-Link DNS NASes (kirkwood-based)"; + compatible = "dlink,dns-kirkwood", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "Power button"; + linux,code = <116>; + gpios = <&gpio1 2 1>; + }; + button@2 { + label = "USB unmount button"; + linux,code = <161>; + gpios = <&gpio1 15 1>; + }; + button@3 { + label = "Reset button"; + linux,code = <0x198>; + gpios = <&gpio1 16 1>; + }; + }; + + ocp@f1000000 { + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + + nand@3000000 { + status = "okay"; + + partition@0 { + label = "u-boot"; + reg = <0x0000000 0x100000>; + read-only; + }; + + partition@100000 { + label = "uImage"; + reg = <0x0100000 0x500000>; + }; + + partition@600000 { + label = "ramdisk"; + reg = <0x0600000 0x500000>; + }; + + partition@b00000 { + label = "image"; + reg = <0x0b00000 0x6600000>; + }; + + partition@7100000 { + label = "mini firmware"; + reg = <0x7100000 0xa00000>; + }; + + partition@7b00000 { + label = "config"; + reg = <0x7b00000 0x500000>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts index 78b0f06a09a2..26e281fbf6bc 100644 --- a/arch/arm/boot/dts/kirkwood-dreamplug.dts +++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts @@ -20,5 +20,55 @@ clock-frequency = <200000000>; status = "ok"; }; + + spi@10600 { + status = "okay"; + + m25p40@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "mx25l1606e"; + reg = <0>; + spi-max-frequency = <50000000>; + mode = <0>; + + partition@0 { + reg = <0x0 0x80000>; + label = "u-boot"; + }; + + partition@100000 { + reg = <0x100000 0x10000>; + label = "u-boot env"; + }; + + partition@180000 { + reg = <0x180000 0x10000>; + label = "dtb"; + }; + }; + }; + + sata@80000 { + status = "okay"; + nr-ports = <1>; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + + bluetooth { + label = "dreamplug:blue:bluetooth"; + gpios = <&gpio1 15 1>; + }; + wifi { + label = "dreamplug:green:wifi"; + gpios = <&gpio1 16 1>; + }; + wifi-ap { + label = "dreamplug:green:wifi_ap"; + gpios = <&gpio1 17 1>; + }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts new file mode 100644 index 000000000000..7c8238fbb6f9 --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -0,0 +1,99 @@ +/dts-v1/; + +/include/ "kirkwood.dtsi" + +/ { + model = "Seagate GoFlex Net"; + compatible = "seagate,goflexnet", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x8000000>; + }; + + chosen { + bootargs = "console=ttyS0,115200n8 earlyprintk root=/dev/sda1 rootdelay=10"; + }; + + ocp@f1000000 { + serial@12000 { + clock-frequency = <200000000>; + status = "ok"; + }; + + nand@3000000 { + status = "okay"; + + partition@0 { + label = "u-boot"; + reg = <0x0000000 0x100000>; + read-only; + }; + + partition@100000 { + label = "uImage"; + reg = <0x0100000 0x400000>; + }; + + partition@500000 { + label = "pogoplug"; + reg = <0x0500000 0x2000000>; + }; + + partition@2500000 { + label = "root"; + reg = <0x02500000 0xd800000>; + }; + }; + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + + }; + gpio-leds { + compatible = "gpio-leds"; + + health { + label = "status:green:health"; + gpios = <&gpio1 14 1>; + linux,default-trigger = "default-on"; + }; + fault { + label = "status:orange:fault"; + gpios = <&gpio1 15 1>; + }; + left0 { + label = "status:white:left0"; + gpios = <&gpio1 10 0>; + }; + left1 { + label = "status:white:left1"; + gpios = <&gpio1 11 0>; + }; + left2 { + label = "status:white:left2"; + gpios = <&gpio1 12 0>; + }; + left3 { + label = "status:white:left3"; + gpios = <&gpio1 13 0>; + }; + right0 { + label = "status:white:right0"; + gpios = <&gpio1 6 0>; + }; + right1 { + label = "status:white:right1"; + gpios = <&gpio1 7 0>; + }; + right2 { + label = "status:white:right2"; + gpios = <&gpio1 8 0>; + }; + right3 { + label = "status:white:right3"; + gpios = <&gpio1 9 0>; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index f59dcf6dc45f..66794ed75ff1 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -21,6 +21,11 @@ status = "okay"; }; + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + nand@3000000 { status = "okay"; @@ -41,4 +46,37 @@ }; }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "USB Copy"; + linux,code = <133>; + gpios = <&gpio0 29 1>; + }; + button@2 { + label = "Reset"; + linux,code = <0x198>; + gpios = <&gpio0 28 1>; + }; + }; + gpio-leds { + compatible = "gpio-leds"; + + green-os { + label = "ib62x0:green:os"; + gpios = <&gpio0 25 0>; + linux,default-trigger = "default-on"; + }; + red-os { + label = "ib62x0:red:os"; + gpios = <&gpio0 22 0>; + }; + usb-copy { + label = "ib62x0:red:usb_copy"; + gpios = <&gpio0 27 0>; + }; + }; }; diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts index 026a1f82d813..52d947045106 100644 --- a/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -18,9 +18,51 @@ }; ocp@f1000000 { + i2c@11000 { + status = "okay"; + + lm63: lm63@4c { + compatible = "national,lm63"; + reg = <0x4c>; + }; + }; serial@12000 { clock-frequency = <200000000>; status = "ok"; }; }; + gpio-leds { + compatible = "gpio-leds"; + + led-level { + label = "led_level"; + gpios = <&gpio1 9 0>; + linux,default-trigger = "default-on"; + }; + power-blue { + label = "power:blue"; + gpios = <&gpio1 11 0>; + linux,default-trigger = "timer"; + }; + usb1 { + label = "usb1:blue"; + gpios = <&gpio1 12 0>; + }; + usb2 { + label = "usb2:blue"; + gpios = <&gpio1 13 0>; + }; + usb3 { + label = "usb3:blue"; + gpios = <&gpio1 14 0>; + }; + usb4 { + label = "usb4:blue"; + gpios = <&gpio1 15 0>; + }; + otb { + label = "otb:blue"; + gpios = <&gpio1 16 0>; + }; + }; }; diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts new file mode 100644 index 000000000000..9510c9ea666c --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts @@ -0,0 +1,20 @@ +/dts-v1/; + +/include/ "kirkwood-lsxl.dtsi" + +/ { + model = "Buffalo Linkstation LS-CHLv2"; + compatible = "buffalo,lschlv2", "buffalo,lsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x4000000>; + }; + + ocp@f1000000 { + serial@12000 { + clock-frequency = <166666667>; + status = "okay"; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts new file mode 100644 index 000000000000..739019c4cba9 --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts @@ -0,0 +1,20 @@ +/dts-v1/; + +/include/ "kirkwood-lsxl.dtsi" + +/ { + model = "Buffalo Linkstation LS-XHL"; + compatible = "buffalo,lsxhl", "buffalo,lsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x10000000>; + }; + + ocp@f1000000 { + serial@12000 { + clock-frequency = <200000000>; + status = "okay"; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi new file mode 100644 index 000000000000..8ac51c08269d --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi @@ -0,0 +1,95 @@ +/include/ "kirkwood.dtsi" + +/ { + chosen { + bootargs = "console=ttyS0,115200n8 earlyprintk"; + }; + + ocp@f1000000 { + sata@80000 { + status = "okay"; + nr-ports = <1>; + }; + + spi@10600 { + status = "okay"; + + m25p40@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "m25p40"; + reg = <0>; + spi-max-frequency = <25000000>; + mode = <0>; + + partition@0 { + reg = <0x0 0x60000>; + label = "uboot"; + read-only; + }; + + partition@60000 { + reg = <0x60000 0x10000>; + label = "dtb"; + read-only; + }; + + partition@70000 { + reg = <0x70000 0x10000>; + label = "uboot_env"; + }; + }; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "Function Button"; + linux,code = <132>; + gpios = <&gpio1 9 1>; + }; + button@2 { + label = "Power-on Switch"; + linux,code = <116>; + gpios = <&gpio1 10 1>; + }; + button@3 { + label = "Power-auto Switch"; + linux,code = <142>; + gpios = <&gpio1 11 1>; + }; + }; + + gpio_leds { + compatible = "gpio-leds"; + + led@1 { + label = "lschlv2:blue:func"; + gpios = <&gpio1 4 1>; + }; + + led@2 { + label = "lschlv2:red:alarm"; + gpios = <&gpio1 5 1>; + }; + + led@3 { + label = "lschlv2:amber:info"; + gpios = <&gpio1 6 1>; + }; + + led@4 { + label = "lschlv2:blue:power"; + gpios = <&gpio1 7 1>; + linux,default-trigger = "default-on"; + }; + + led@5 { + label = "lschlv2:red:func"; + gpios = <&gpio1 16 1>; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood-ts219-6281.dts b/arch/arm/boot/dts/kirkwood-ts219-6281.dts new file mode 100644 index 000000000000..ccbf32757800 --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-ts219-6281.dts @@ -0,0 +1,21 @@ +/dts-v1/; + +/include/ "kirkwood-ts219.dtsi" + +/ { + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "USB Copy"; + linux,code = <133>; + gpios = <&gpio0 15 1>; + }; + button@2 { + label = "Reset"; + linux,code = <0x198>; + gpios = <&gpio0 16 1>; + }; + }; +};
\ No newline at end of file diff --git a/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/kirkwood-ts219-6282.dts new file mode 100644 index 000000000000..fbe9932161a1 --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-ts219-6282.dts @@ -0,0 +1,21 @@ +/dts-v1/; + +/include/ "kirkwood-ts219.dtsi" + +/ { + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "USB Copy"; + linux,code = <133>; + gpios = <&gpio1 11 1>; + }; + button@2 { + label = "Reset"; + linux,code = <0x198>; + gpios = <&gpio1 5 1>; + }; + }; +};
\ No newline at end of file diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi new file mode 100644 index 000000000000..64ea27cb3298 --- /dev/null +++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi @@ -0,0 +1,78 @@ +/include/ "kirkwood.dtsi" + +/ { + model = "QNAP TS219 family"; + compatible = "qnap,ts219", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x20000000>; + }; + + chosen { + bootargs = "console=ttyS0,115200n8"; + }; + + ocp@f1000000 { + i2c@11000 { + status = "okay"; + clock-frequency = <400000>; + + s35390a: s35390a@30 { + compatible = "s35390a"; + reg = <0x30>; + }; + }; + serial@12000 { + clock-frequency = <200000000>; + status = "okay"; + }; + serial@12100 { + clock-frequency = <200000000>; + status = "okay"; + }; + spi@10600 { + status = "okay"; + + m25p128@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "m25p128"; + reg = <0>; + spi-max-frequency = <20000000>; + mode = <0>; + + partition@0000000 { + reg = <0x00000000 0x00080000>; + label = "U-Boot"; + }; + + partition@00200000 { + reg = <0x00200000 0x00200000>; + label = "Kernel"; + }; + + partition@00400000 { + reg = <0x00400000 0x00900000>; + label = "RootFS1"; + }; + partition@00d00000 { + reg = <0x00d00000 0x00300000>; + label = "RootFS2"; + }; + partition@00040000 { + reg = <0x00080000 0x00040000>; + label = "U-Boot Config"; + }; + partition@000c0000 { + reg = <0x000c0000 0x00140000>; + label = "NAS Config"; + }; + }; + }; + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index f95dbc190ab6..cef9616f330a 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -2,6 +2,15 @@ / { compatible = "marvell,kirkwood"; + interrupt-parent = <&intc>; + + intc: interrupt-controller { + compatible = "marvell,orion-intc", "marvell,intc"; + interrupt-controller; + #interrupt-cells = <1>; + reg = <0xf1020204 0x04>, + <0xf1020214 0x04>; + }; ocp@f1000000 { compatible = "simple-bus"; @@ -9,6 +18,24 @@ #address-cells = <1>; #size-cells = <1>; + gpio0: gpio@10100 { + compatible = "marvell,orion-gpio"; + #gpio-cells = <2>; + gpio-controller; + reg = <0x10100 0x40>; + ngpio = <32>; + interrupts = <35>, <36>, <37>, <38>; + }; + + gpio1: gpio@10140 { + compatible = "marvell,orion-gpio"; + #gpio-cells = <2>; + gpio-controller; + reg = <0x10140 0x40>; + ngpio = <18>; + interrupts = <39>, <40>, <41>; + }; + serial@12000 { compatible = "ns16550a"; reg = <0x12000 0x100>; @@ -33,6 +60,29 @@ interrupts = <53>; }; + spi@10600 { + compatible = "marvell,orion-spi"; + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + interrupts = <23>; + reg = <0x10600 0x28>; + status = "disabled"; + }; + + wdt@20300 { + compatible = "marvell,orion-wdt"; + reg = <0x20300 0x28>; + status = "okay"; + }; + + sata@80000 { + compatible = "marvell,orion-sata"; + reg = <0x80000 0x5000>; + interrupts = <21>; + status = "disabled"; + }; + nand@3000000 { #address-cells = <1>; #size-cells = <1>; @@ -45,5 +95,15 @@ /* set partition map and/or chip-delay in board dts */ status = "disabled"; }; + + i2c@11000 { + compatible = "marvell,mv64xxx-i2c"; + reg = <0x11000 0x20>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <29>; + clock-frequency = <100000>; + status = "disabled"; + }; }; }; diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index f725b9637b33..3c9f32f9b6b4 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -192,6 +192,7 @@ CONFIG_RTC_DRV_MC13XXX=y CONFIG_RTC_DRV_MXC=y CONFIG_DMADEVICES=y CONFIG_IMX_SDMA=y +CONFIG_MXS_DMA=y CONFIG_COMMON_CLK_DEBUG=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index ccdb6357fb74..4edcfb4e4dee 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -34,7 +34,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_AEABI=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_AUTO_ZRELADDR=y CONFIG_FPE_NWFPE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b152de79fd95..e58edc36b406 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_TWL92330=y CONFIG_RTC_DRV_TWL4030=y +CONFIG_DMADEVICES=y +CONFIG_DMA_OMAP=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig index 1d24f8458bef..71277a1591ba 100644 --- a/arch/arm/configs/tct_hammer_defconfig +++ b/arch/arm/configs/tct_hammer_defconfig @@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_BUG is not set +# CONFIG_BUGVERBOSE is not set # CONFIG_ELF_CORE is not set # CONFIG_SHMEM is not set CONFIG_SLOB=y diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 004c1bc95d2b..e4448e16046d 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned static inline void vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index 93226cf23ae0..b1479fd04a95 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -7,121 +7,10 @@ */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H - -#if __LINUX_ARM_ARCH__ < 6 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ -# include <asm-generic/mutex-xchg.h> -#else - /* - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized - * atomic decrement (it is not a reliable atomic decrement but it satisfies - * the defined semantics for our purpose, while being smaller and faster - * than a real atomic decrement or atomic swap. The idea is to attempt - * decrementing the lock value only once. If once decremented it isn't zero, - * or if its store-back fails due to a dispute on the exclusive store, we - * simply bail out immediately through the slow path where the lock will be - * reattempted until it succeeds. + * On pre-ARMv6 hardware this results in a swp-based implementation, + * which is the most efficient. For ARMv6+, we emit a pair of exclusive + * accesses instead. */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - fail_fn(count); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - __res = fail_fn(count); - return __res; -} - -/* - * Same trick is used for the unlock fast path. However the original value, - * rather than the result, is used to test for success in order to have - * better generated assembly. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "ldrex %0, [%3] \n\t" - "add %1, %0, #1 \n\t" - "strex %2, %1, [%3] " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __orig |= __ex_flag; - if (unlikely(__orig != 0)) - fail_fn(count); -} - -/* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. - */ -#define __mutex_slowpath_needs_to_unlock() 1 - -/* - * For __mutex_fastpath_trylock we use another construct which could be - * described as a "single value cmpxchg". - * - * This provides the needed trylock semantics like cmpxchg would, but it is - * lighter and less generic than a true cmpxchg implementation. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "1: ldrex %0, [%3] \n\t" - "subs %1, %0, #1 \n\t" - "strexeq %2, %1, [%3] \n\t" - "movlt %0, #0 \n\t" - "cmpeq %2, #0 \n\t" - "bgt 1b " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&count->counter) - : "cc", "memory" ); - - return __orig; -} - -#endif +#include <asm-generic/mutex-xchg.h> #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f66626d71e7d..41dc31f834c3 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index e3f757263438..05b8e82ec9f5 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -10,5 +10,7 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate); #endif diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 23ebc0c82a39..24d284a1bfc7 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -196,7 +196,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn } struct membank { phys_addr_t start; - unsigned long size; + phys_addr_t size; unsigned int highmem; }; @@ -217,7 +217,7 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -extern int arm_add_memory(phys_addr_t start, unsigned long size); +extern int arm_add_memory(phys_addr_t start, phys_addr_t size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 0d1851ca6eb9..0f82098c9bfe 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -244,6 +244,19 @@ svc_preempt: b 1b #endif +__und_fault: + @ Correct the PC such that it is pointing at the instruction + @ which caused the fault. If the faulting instruction was ARM + @ the PC will be pointing at the next instruction, and have to + @ subtract 4. Otherwise, it is Thumb, and the PC will be + @ pointing at the second half of the Thumb instruction. We + @ have to subtract 2. + ldr r2, [r0, #S_PC] + sub r2, r2, r1 + str r2, [r0, #S_PC] + b do_undefinstr +ENDPROC(__und_fault) + .align 5 __und_svc: #ifdef CONFIG_KPROBES @@ -261,25 +274,32 @@ __und_svc: @ @ r0 - instruction @ -#ifndef CONFIG_THUMB2_KERNEL +#ifndef CONFIG_THUMB2_KERNEL ldr r0, [r4, #-4] #else + mov r1, #2 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 - ldrhhs r9, [r4] @ bottom 16 bits - orrhs r0, r9, r0, lsl #16 + blo __und_svc_fault + ldrh r9, [r4] @ bottom 16 bits + add r4, r4, #2 + str r4, [sp, #S_PC] + orr r0, r9, r0, lsl #16 #endif - adr r9, BSYM(1f) + adr r9, BSYM(__und_svc_finish) mov r2, r4 bl call_fpe + mov r1, #4 @ PC correction to apply +__und_svc_fault: mov r0, sp @ struct pt_regs *regs - bl do_undefinstr + bl __und_fault @ @ IRQs off again before pulling preserved data off the stack @ -1: disable_irq_notrace +__und_svc_finish: + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -423,25 +443,33 @@ __und_usr: mov r2, r4 mov r3, r5 + @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the + @ faulting instruction depending on Thumb mode. + @ r3 = regs->ARM_cpsr @ - @ fall through to the emulation code, which returns using r9 if - @ it has emulated the instruction, or the more conventional lr - @ if we are to treat this as a real undefined instruction - @ - @ r0 - instruction + @ The emulation code returns using r9 if it has emulated the + @ instruction, or the more conventional lr if we are to treat + @ this as a real undefined instruction @ adr r9, BSYM(ret_from_exception) - adr lr, BSYM(__und_usr_unknown) + tst r3, #PSR_T_BIT @ Thumb mode? - itet eq @ explicit IT needed for the 1f label - subeq r4, r2, #4 @ ARM instr at LR - 4 - subne r4, r2, #2 @ Thumb instr at LR - 2 -1: ldreqt r0, [r4] + bne __und_usr_thumb + sub r4, r2, #4 @ ARM instr at LR - 4 +1: ldrt r0, [r4] #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r0, r0 @ little endian instruction + rev r0, r0 @ little endian instruction #endif - beq call_fpe + @ r0 = 32-bit ARM instruction which caused the exception + @ r2 = PC value for the following instruction (:= regs->ARM_pc) + @ r4 = PC value for the faulting instruction + @ lr = 32-bit undefined instruction function + adr lr, BSYM(__und_usr_fault_32) + b call_fpe + +__und_usr_thumb: @ Thumb instruction + sub r4, r2, #2 @ First half of thumb instr at LR - 2 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 /* * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms @@ -455,7 +483,7 @@ __und_usr: ldr r5, .LCcpu_architecture ldr r5, [r5] cmp r5, #CPU_ARCH_ARMv7 - blo __und_usr_unknown + blo __und_usr_fault_16 @ 16bit undefined instruction /* * The following code won't get run unless the running CPU really is v7, so * coding round the lack of ldrht on older arches is pointless. Temporarily @@ -463,15 +491,18 @@ __und_usr: */ .arch armv6t2 #endif -2: - ARM( ldrht r5, [r4], #2 ) - THUMB( ldrht r5, [r4] ) - THUMB( add r4, r4, #2 ) +2: ldrht r5, [r4] cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_unknown -3: ldrht r0, [r4] + blo __und_usr_fault_16 @ 16bit undefined instruction +3: ldrht r0, [r2] add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 + adr lr, BSYM(__und_usr_fault_32) + @ r0 = the two 16-bit Thumb instructions which caused the exception + @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) + @ r4 = PC value for the first 16-bit Thumb instruction + @ lr = 32bit undefined instruction function #if __LINUX_ARM_ARCH__ < 7 /* If the target arch was overridden, change it back: */ @@ -482,17 +513,13 @@ __und_usr: #endif #endif /* __LINUX_ARM_ARCH__ < 7 */ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ - b __und_usr_unknown + b __und_usr_fault_16 #endif - UNWIND(.fnend ) + UNWIND(.fnend) ENDPROC(__und_usr) - @ - @ fallthrough to call_fpe - @ - /* - * The out of line fixup for the ldrt above. + * The out of line fixup for the ldrt instructions above. */ .pushsection .fixup, "ax" .align 2 @@ -524,11 +551,12 @@ ENDPROC(__und_usr) * NEON handler code. * * Emulators may wish to make use of the following registers: - * r0 = instruction opcode. - * r2 = PC+4 + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) + * r2 = PC value to resume execution after successful emulation * r9 = normal "successful" return address - * r10 = this threads thread_info structure. + * r10 = this threads thread_info structure * lr = unrecognised instruction return address + * IRQs disabled, FIQs enabled. */ @ @ Fall-through from Thumb-2 __und_usr @@ -659,12 +687,17 @@ ENTRY(no_fp) mov pc, lr ENDPROC(no_fp) -__und_usr_unknown: - enable_irq +__und_usr_fault_32: + mov r1, #4 + b 1f +__und_usr_fault_16: + mov r1, #2 +1: enable_irq mov r0, sp adr lr, BSYM(ret_from_exception) - b do_undefinstr -ENDPROC(__und_usr_unknown) + b __und_fault +ENDPROC(__und_usr_fault_32) +ENDPROC(__und_usr_fault_16) .align 5 __pabt_usr: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 49d9f9305247..978eac57e04a 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -51,23 +51,15 @@ ret_fast_syscall: fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 work_pending: - tst r1, #_TIF_NEED_RESCHED - bne work_resched - /* - * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here - */ - ldr r2, [sp, #S_PSR] mov r0, sp @ 'regs' - tst r2, #15 @ are we returning to user mode? - bne no_work_pending @ no? just leave, then... mov r2, why @ 'syscall' - tst r1, #_TIF_SIGPENDING @ delivering a signal? - movne why, #0 @ prevent further restarts - bl do_notify_resume - b ret_slow_syscall @ Check work again + bl do_work_pending + cmp r0, #0 + beq no_work_pending + movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) + ldmia sp, {r0 - r6} @ have to reload r0 - r6 + b local_restart @ ... and off we go -work_resched: - bl schedule /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ @@ -409,6 +401,7 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif +local_restart: ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args @@ -450,7 +443,8 @@ __sys_trace: mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit - ldmccia r1, {r0 - r3} @ have to reload r0 - r3 + ldmccia r1, {r0 - r6} @ have to reload r0 - r6 + stmccia sp, {r4, r5} @ and update the stack args ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine b 2b diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index df0bf0c8cb79..34e56647dcee 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -179,19 +179,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, old = *parent; *parent = return_hooker; - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer); - if (err == -EBUSY) { - *parent = old; - return; - } - trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; *parent = old; + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer); + if (err == -EBUSY) { + *parent = old; + return; } } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 19c95ea65b2f..693b744fd572 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -247,6 +247,7 @@ void machine_shutdown(void) void machine_halt(void) { machine_shutdown(); + local_irq_disable(); while (1); } @@ -268,6 +269,7 @@ void machine_restart(char *cmd) /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); + local_irq_disable(); while (1); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index dab711e6e1ca..3e0fc5f7ed4b 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -25,6 +25,7 @@ #include <linux/regset.h> #include <linux/audit.h> #include <linux/tracehook.h> +#include <linux/unistd.h> #include <asm/pgtable.h> #include <asm/traps.h> diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 27d186abbc06..f4515393248d 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -21,6 +21,8 @@ struct clock_data { u32 epoch_cyc_copy; u32 mult; u32 shift; + bool suspended; + bool needs_suspend; }; static void sched_clock_poll(unsigned long wrap_ticks); @@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) u64 epoch_ns; u32 epoch_cyc; + if (cd.suspended) + return cd.epoch_ns; + /* * Load the epoch_cyc and epoch_ns atomically. We do this by * ensuring that we always write epoch_cyc, epoch_ns and @@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks) update_sched_clock(); } +void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate) +{ + setup_sched_clock(read, bits, rate); + cd.needs_suspend = true; +} + void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; @@ -169,11 +181,23 @@ void __init sched_clock_postinit(void) static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); + if (cd.needs_suspend) + cd.suspended = true; return 0; } +static void sched_clock_resume(void) +{ + if (cd.needs_suspend) { + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + cd.suspended = false; + } +} + static struct syscore_ops sched_clock_ops = { .suspend = sched_clock_suspend, + .resume = sched_clock_resume, }; static int __init sched_clock_syscore_init(void) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e15d83bb4ea3..a81dcecc7343 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -508,7 +508,7 @@ void __init dump_machine_table(void) /* can't use cpu_relax() here as it may require MMU setup */; } -int __init arm_add_memory(phys_addr_t start, unsigned long size) +int __init arm_add_memory(phys_addr_t start, phys_addr_t size) { struct membank *bank = &meminfo.bank[meminfo.nr_banks]; @@ -538,7 +538,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) } #endif - bank->size = size & PAGE_MASK; + bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); /* * Check whether this memory region has non-zero size or @@ -558,7 +558,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) static int __init early_mem(char *p) { static int usermem __initdata = 0; - unsigned long size; + phys_addr_t size; phys_addr_t start; char *endp; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 536c5d6b340b..f27789e4e38a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -27,7 +27,6 @@ */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) -#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) /* * With EABI, the syscall number has to be loaded into r7. @@ -48,18 +47,6 @@ const unsigned long sigreturn_codes[7] = { }; /* - * Either we support OABI only, or we have EABI with the OABI - * compat layer enabled. In the later case we don't know if - * user space is EABI or not, and if not we must not clobber r7. - * Always using the OABI syscall solves that issue and works for - * all those cases. - */ -const unsigned long syscall_restart_code[2] = { - SWI_SYS_RESTART, /* swi __NR_restart_syscall */ - 0xe49df004, /* ldr pc, [sp], #4 */ -}; - -/* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) @@ -582,12 +569,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ -static void do_signal(struct pt_regs *regs, int syscall) +static int do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; + int restart = 0; /* * If we were from a system call, check for system call restarting... @@ -602,15 +590,15 @@ static void do_signal(struct pt_regs *regs, int syscall) * debugger will see the already changed PSW. */ switch (retval) { + case -ERESTART_RESTARTBLOCK: + restart -= 2; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: + restart++; regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc = restart_addr; break; - case -ERESTART_RESTARTBLOCK: - regs->ARM_r0 = -EINTR; - break; } } @@ -619,14 +607,17 @@ static void do_signal(struct pt_regs *regs, int syscall) * point the debugger may change all our registers ... */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->ARM_pc != restart_addr) + restart = 0; if (signr > 0) { - /* - * Depending on the signal settings we may need to revert the - * decision to restart the system call. But skip this if a - * debugger has chosen to restart at a different PC. - */ - if (regs->ARM_pc == restart_addr) { - if (retval == -ERESTARTNOHAND + if (unlikely(restart)) { + if (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS && !(ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; @@ -635,52 +626,43 @@ static void do_signal(struct pt_regs *regs, int syscall) } handle_signal(signr, &ka, &info, regs); - return; - } - - if (syscall) { - /* - * Handle restarting a different system call. As above, - * if a debugger has chosen to restart at a different PC, - * ignore the restart. - */ - if (retval == -ERESTART_RESTARTBLOCK - && regs->ARM_pc == continue_addr) { - if (thumb_mode(regs)) { - regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; - regs->ARM_pc -= 2; - } else { -#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) - regs->ARM_r7 = __NR_restart_syscall; - regs->ARM_pc -= 4; -#else - u32 __user *usp; - - regs->ARM_sp -= 4; - usp = (u32 __user *)regs->ARM_sp; - - if (put_user(regs->ARM_pc, usp) == 0) { - regs->ARM_pc = KERN_RESTART_CODE; - } else { - regs->ARM_sp += 4; - force_sigsegv(0, current); - } -#endif - } - } + return 0; } restore_saved_sigmask(); + if (unlikely(restart)) + regs->ARM_pc = continue_addr; + return restart; } -asmlinkage void -do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) +asmlinkage int +do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { - if (thread_flags & _TIF_SIGPENDING) - do_signal(regs, syscall); - - if (thread_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } + do { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) + return 0; + local_irq_enable(); + if (thread_flags & _TIF_SIGPENDING) { + int restart = do_signal(regs, syscall); + if (unlikely(restart)) { + /* + * Restart without handlers. + * Deal with it without leaving + * the kernel space. + */ + return restart; + } + syscall = 0; + } else { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + } + local_irq_disable(); + thread_flags = current_thread_info()->flags; + } while (thread_flags & _TIF_WORK_MASK); + return 0; } diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h index 6fcfe8398aa4..5ff067b7c752 100644 --- a/arch/arm/kernel/signal.h +++ b/arch/arm/kernel/signal.h @@ -8,7 +8,5 @@ * published by the Free Software Foundation. */ #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) -#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes)) extern const unsigned long sigreturn_codes[7]; -extern const unsigned long syscall_restart_code[2]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index aea74f5bc34a..ebd8ad274d76 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -563,7 +563,8 @@ void smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - smp_cross_call(&mask, IPI_CPU_STOP); + if (!cpumask_empty(&mask)) + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 198b08456e90..26c12c6440fc 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid) * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ -void init_cpu_topology(void) +void __init init_cpu_topology(void) { unsigned int cpu; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 8b97d739b17b..f7945218b8c6 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -402,18 +402,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { - unsigned int correction = thumb_mode(regs) ? 2 : 4; unsigned int instr; siginfo_t info; void __user *pc; - /* - * According to the ARM ARM, PC is 2 or 4 bytes ahead, - * depending whether we're in Thumb mode or not. - * Correct this offset. - */ - regs->ARM_pc -= correction; - pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { @@ -852,8 +844,6 @@ void __init early_trap_init(void *vectors_base) */ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), sigreturn_codes, sizeof(sigreturn_codes)); - memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), - syscall_restart_code, sizeof(syscall_restart_code)); flush_icache_range(vectors, vectors + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 2473fd1fd51c..af72969820b4 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ call_with_stack.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o -mmu-y += copy_from_user.o copy_to_user.o + +# the code in uaccess.S is not preemption safe and +# probably faster on ARMv3 only +ifeq ($(CONFIG_PREEMPT),y) + mmu-y += copy_from_user.o copy_to_user.o +else +ifneq ($(CONFIG_CPU_32v3),y) + mmu-y += copy_from_user.o copy_to_user.o +else + mmu-y += uaccess.o +endif +endif # using lib_ here won't override already available weak symbols obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o -lib-$(CONFIG_MMU) += $(mmu-y) -lib-y += io-readsw-armv4.o io-writesw-armv4.o +lib-$(CONFIG_MMU) += $(mmu-y) + +ifeq ($(CONFIG_CPU_32v3),y) + lib-y += io-readsw-armv3.o io-writesw-armv3.o +else + lib-y += io-readsw-armv4.o io-writesw-armv4.o +endif + lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o lib-$(CONFIG_ARCH_SHARK) += io-shark.o diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S new file mode 100644 index 000000000000..88487c8c4f23 --- /dev/null +++ b/arch/arm/lib/io-readsw-armv3.S @@ -0,0 +1,106 @@ +/* + * linux/arch/arm/lib/io-readsw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> + +.Linsw_bad_alignment: + adr r0, .Linsw_bad_align_msg + mov r2, lr + b panic +.Linsw_bad_align_msg: + .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.Linsw_align: tst r1, #1 + bne .Linsw_bad_alignment + + ldr r3, [r0] + strb r3, [r1], #1 + mov r3, r3, lsr #8 + strb r3, [r1], #1 + + subs r2, r2, #1 + moveq pc, lr + +ENTRY(__raw_readsw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .Linsw_align + +.Linsw_aligned: mov ip, #0xff + orr ip, ip, ip, lsl #8 + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .Lno_insw_8 + +.Linsw_8_lp: ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + ldr r5, [r0] + and r5, r5, ip + ldr r6, [r0] + orr r5, r5, r6, lsl #16 + + ldr r6, [r0] + and r6, r6, ip + ldr lr, [r0] + orr r6, r6, lr, lsl #16 + + stmia r1!, {r3 - r6} + + subs r2, r2, #8 + bpl .Linsw_8_lp + + tst r2, #7 + ldmeqfd sp!, {r4, r5, r6, pc} + +.Lno_insw_8: tst r2, #4 + beq .Lno_insw_4 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + stmia r1!, {r3, r4} + +.Lno_insw_4: tst r2, #2 + beq .Lno_insw_2 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + str r3, [r1], #4 + +.Lno_insw_2: tst r2, #1 + ldrne r3, [r0] + strneb r3, [r1], #1 + movne r3, r3, lsr #8 + strneb r3, [r1] + + ldmfd sp!, {r4, r5, r6, pc} + + diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S new file mode 100644 index 000000000000..49b800419e32 --- /dev/null +++ b/arch/arm/lib/io-writesw-armv3.S @@ -0,0 +1,126 @@ +/* + * linux/arch/arm/lib/io-writesw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> + +.Loutsw_bad_alignment: + adr r0, .Loutsw_bad_align_msg + mov r2, lr + b panic +.Loutsw_bad_align_msg: + .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.Loutsw_align: tst r1, #1 + bne .Loutsw_bad_alignment + + add r1, r1, #2 + + ldr r3, [r1, #-4] + mov r3, r3, lsr #16 + orr r3, r3, r3, lsl #16 + str r3, [r0] + subs r2, r2, #1 + moveq pc, lr + +ENTRY(__raw_writesw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .Loutsw_align + + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .Lno_outsw_8 + +.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r5, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r5, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r6, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r6, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + subs r2, r2, #8 + bpl .Loutsw_8_lp + + tst r2, #7 + ldmeqfd sp!, {r4, r5, r6, pc} + +.Lno_outsw_8: tst r2, #4 + beq .Lno_outsw_4 + + ldmia r1!, {r3, r4} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.Lno_outsw_4: tst r2, #2 + beq .Lno_outsw_2 + + ldr r3, [r1], #4 + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.Lno_outsw_2: tst r2, #1 + + ldrne r3, [r1] + + movne ip, r3, lsl #16 + orrne ip, ip, ip, lsr #16 + strne ip, [r0] + + ldmfd sp!, {r4, r5, r6, pc} diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S new file mode 100644 index 000000000000..5c908b1cb8ed --- /dev/null +++ b/arch/arm/lib/uaccess.S @@ -0,0 +1,564 @@ +/* + * linux/arch/arm/lib/uaccess.S + * + * Copyright (C) 1995, 1996,1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Routines to block copy data to/from user memory + * These are highly optimised both for the 4k page size + * and for various alignments. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/errno.h> +#include <asm/domain.h> + + .text + +#define PAGE_SHIFT 12 + +/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) + * Purpose : copy a block to user memory from kernel memory + * Params : to - user memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ + +.Lc2u_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + sub r2, r2, ip + b .Lc2u_dest_aligned + +ENTRY(__copy_to_user) + stmfd sp!, {r2, r4 - r7, lr} + cmp r2, #4 + blt .Lc2u_not_enough + ands ip, r0, #3 + bne .Lc2u_dest_not_aligned +.Lc2u_dest_aligned: + + ands ip, r1, #3 + bne .Lc2u_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lc2u_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_0nowords + ldr r3, [r1], #4 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .Lc2u_0rem8lp + +.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + ldmia r1!, {r3 - r6} + subs ip, ip, #32 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_0cpy8lp + +.Lc2u_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} @ Shouldnt fault + tst ip, #8 + ldmneia r1!, {r3 - r4} + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + ldrne r3, [r1], #4 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_0fupi +.Lc2u_0nowords: teq ip, #0 + beq .Lc2u_finished +.Lc2u_nowords: cmp ip, #2 + ldrb r3, [r1], #1 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_not_enough: + movs ip, r2 + bne .Lc2u_nowords +.Lc2u_finished: mov r0, #0 + ldmfd sp!, {r2, r4 - r7, pc} + +.Lc2u_src_not_aligned: + bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt .Lc2u_3fupi + beq .Lc2u_2fupi +.Lc2u_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_1nowords + mov r3, r7, pull #8 + ldr r7, [r1], #4 + orr r3, r3, r7, push #24 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_1rem8lp + +.Lc2u_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_1cpy8lp + +.Lc2u_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #8 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #24 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_1fupi +.Lc2u_1nowords: mov r3, r7, get_byte_1 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + movge r3, r7, get_byte_2 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + movgt r3, r7, get_byte_3 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_2nowords + mov r3, r7, pull #16 + ldr r7, [r1], #4 + orr r3, r3, r7, push #16 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_2rem8lp + +.Lc2u_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_2cpy8lp + +.Lc2u_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #16 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #16 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_2fupi +.Lc2u_2nowords: mov r3, r7, get_byte_2 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + movge r3, r7, get_byte_3 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_3nowords + mov r3, r7, pull #24 + ldr r7, [r1], #4 + orr r3, r3, r7, push #8 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_3rem8lp + +.Lc2u_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_3cpy8lp + +.Lc2u_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #24 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #8 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_3fupi +.Lc2u_3nowords: mov r3, r7, get_byte_3 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished +ENDPROC(__copy_to_user) + + .pushsection .fixup,"ax" + .align 0 +9001: ldmfd sp!, {r0, r4 - r7, pc} + .popsection + +/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); + * Purpose : copy a block from user memory to kernel memory + * Params : to - kernel memory + * : from - user memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +.Lcfu_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 +USER( TUSER( ldrb) r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + sub r2, r2, ip + b .Lcfu_dest_aligned + +ENTRY(__copy_from_user) + stmfd sp!, {r0, r2, r4 - r7, lr} + cmp r2, #4 + blt .Lcfu_not_enough + ands ip, r0, #3 + bne .Lcfu_dest_not_aligned +.Lcfu_dest_aligned: + ands ip, r1, #3 + bne .Lcfu_src_not_aligned + +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lcfu_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_0nowords +USER( TUSER( ldr) r3, [r1], #4) + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .Lcfu_0rem8lp + +.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + ldmia r1!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #32 + stmia r0!, {r3 - r6} + bpl .Lcfu_0cpy8lp + +.Lcfu_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} @ Shouldnt fault + stmgeia r0!, {r3 - r6} + tst ip, #8 + ldmneia r1!, {r3 - r4} @ Shouldnt fault + stmneia r0!, {r3 - r4} + tst ip, #4 + TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_0fupi +.Lcfu_0nowords: teq ip, #0 + beq .Lcfu_finished +.Lcfu_nowords: cmp ip, #2 +USER( TUSER( ldrb) r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_not_enough: + movs ip, r2 + bne .Lcfu_nowords +.Lcfu_finished: mov r0, #0 + add sp, sp, #8 + ldmfd sp!, {r4 - r7, pc} + +.Lcfu_src_not_aligned: + bic r1, r1, #3 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + cmp ip, #2 + bgt .Lcfu_3fupi + beq .Lcfu_2fupi +.Lcfu_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_1nowords + mov r3, r7, pull #8 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #24 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_1rem8lp + +.Lcfu_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} @ Shouldnt fault + subs ip, ip, #16 + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + bpl .Lcfu_1cpy8lp + +.Lcfu_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #8 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #24 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_1fupi +.Lcfu_1nowords: mov r3, r7, get_byte_1 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, get_byte_2 + strgeb r3, [r0], #1 + movgt r3, r7, get_byte_3 + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_2nowords + mov r3, r7, pull #16 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #16 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_2rem8lp + + +.Lcfu_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} @ Shouldnt fault + subs ip, ip, #16 + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + bpl .Lcfu_2cpy8lp + +.Lcfu_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #16 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #16 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_2fupi +.Lcfu_2nowords: mov r3, r7, get_byte_2 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, get_byte_3 + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_3nowords + mov r3, r7, pull #24 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #8 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_3rem8lp + +.Lcfu_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .Lcfu_3cpy8lp + +.Lcfu_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #24 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #8 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_3fupi +.Lcfu_3nowords: mov r3, r7, get_byte_3 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished +ENDPROC(__copy_from_user) + + .pushsection .fixup,"ax" + .align 0 + /* + * We took an exception. r0 contains a pointer to + * the byte not copied. + */ +9001: ldr r2, [sp], #4 @ void *to + sub r2, r0, r2 @ bytes copied + ldr r1, [sp], #4 @ unsigned long count + subs r4, r1, r2 @ bytes left to copy + movne r1, r4 + blne __memzero + mov r0, r4 + ldmfd sp!, {r4 - r7, pc} + .popsection + diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 5de69f2fcca9..f6b9fc70161b 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -162,38 +162,6 @@ static void __init davinci_ntosd2_map_io(void) dm644x_init(); } -/* - I2C initialization -*/ -static struct davinci_i2c_platform_data ntosd2_i2c_pdata = { - .bus_freq = 20 /* kHz */, - .bus_delay = 100 /* usec */, -}; - -static struct i2c_board_info __initdata ntosd2_i2c_info[] = { -}; - -static int ntosd2_init_i2c(void) -{ - int status; - - davinci_init_i2c(&ntosd2_i2c_pdata); - status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type); - if (status == 0) { - status = gpio_direction_input(NTOSD2_MSP430_IRQ); - if (status == 0) { - status = gpio_to_irq(NTOSD2_MSP430_IRQ); - if (status > 0) { - ntosd2_i2c_info[0].irq = status; - i2c_register_board_info(1, - ntosd2_i2c_info, - ARRAY_SIZE(ntosd2_i2c_info)); - } - } - } - return status; -} - static struct davinci_mmc_config davinci_ntosd2_mmc_config = { .wires = 4, .version = MMC_CTLR_VERSION_1 @@ -218,7 +186,6 @@ static __init void davinci_ntosd2_init(void) { struct clk *aemif_clk; struct davinci_soc_info *soc_info = &davinci_soc_info; - int status; aemif_clk = clk_get(NULL, "aemif"); clk_enable(aemif_clk); @@ -242,12 +209,6 @@ static __init void davinci_ntosd2_init(void) platform_add_devices(davinci_ntosd2_devices, ARRAY_SIZE(davinci_ntosd2_devices)); - /* Initialize I2C interface specific for this board */ - status = ntosd2_init_i2c(); - if (status < 0) - pr_warning("davinci_ntosd2_init: msp430 irq setup failed:" - " %d\n", status); - davinci_serial_init(&uart_config); dm644x_init_asp(&dm644x_ntosd2_snd_data); diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index d1624a315c9a..783eab6845c4 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -546,6 +546,7 @@ static struct lcd_ctrl_config lcd_cfg = { .sync_edge = 0, .sync_ctrl = 1, .raster_order = 0, + .fifo_th = 6, }; struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = { diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index f07fd16e0c9b..9bc97a5baaa8 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -20,22 +20,6 @@ #include <mach/bridge-regs.h> #include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - int irqoff; - BUG_ON(irq < IRQ_DOVE_GPIO_0_7 || irq > IRQ_DOVE_HIGH_GPIO); - - irqoff = irq <= IRQ_DOVE_GPIO_16_23 ? irq - IRQ_DOVE_GPIO_0_7 : - 3 + irq - IRQ_DOVE_GPIO_24_31; - - orion_gpio_irq_handler(irqoff << 3); - if (irq == IRQ_DOVE_HIGH_GPIO) { - orion_gpio_irq_handler(40); - orion_gpio_irq_handler(48); - orion_gpio_irq_handler(56); - } -} - static void pmu_irq_mask(struct irq_data *d) { int pin = irq_to_pmu(d->irq); @@ -90,6 +74,27 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) } } +static int __initdata gpio0_irqs[4] = { + IRQ_DOVE_GPIO_0_7, + IRQ_DOVE_GPIO_8_15, + IRQ_DOVE_GPIO_16_23, + IRQ_DOVE_GPIO_24_31, +}; + +static int __initdata gpio1_irqs[4] = { + IRQ_DOVE_HIGH_GPIO, + 0, + 0, + 0, +}; + +static int __initdata gpio2_irqs[4] = { + 0, + 0, + 0, + 0, +}; + void __init dove_init_irq(void) { int i; @@ -100,19 +105,14 @@ void __init dove_init_irq(void) /* * Initialize gpiolib for GPIOs 0-71. */ - orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0, - IRQ_DOVE_GPIO_START); - irq_set_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler); - - orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0, - IRQ_DOVE_GPIO_START + 32); - irq_set_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler); - - orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0, - IRQ_DOVE_GPIO_START + 64); + orion_gpio_init(NULL, 0, 32, (void __iomem *)DOVE_GPIO_LO_VIRT_BASE, 0, + IRQ_DOVE_GPIO_START, gpio0_irqs); + + orion_gpio_init(NULL, 32, 32, (void __iomem *)DOVE_GPIO_HI_VIRT_BASE, 0, + IRQ_DOVE_GPIO_START + 32, gpio1_irqs); + + orion_gpio_init(NULL, 64, 8, (void __iomem *)DOVE_GPIO2_VIRT_BASE, 0, + IRQ_DOVE_GPIO_START + 64, gpio2_irqs); /* * Mask and clear PMU interrupts diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 373c3c00d24c..c0bc83a7663e 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -115,7 +115,7 @@ static __init int exynos_pm_dt_parse_domains(void) } #endif /* CONFIG_OF */ -static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, +static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, struct exynos_pm_domain *pd) { if (pdev->dev.bus) { diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c index 7aa6313fb167..f69ca4680049 100644 --- a/arch/arm/mach-imx/clk-imx27.c +++ b/arch/arm/mach-imx/clk-imx27.c @@ -223,7 +223,7 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0"); - clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0"); + clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0"); clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); @@ -250,8 +250,10 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1"); clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0"); clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad"); - clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma"); - clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma"); + clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0"); + clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0"); clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL); clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL); clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL); diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c index 8e19e70f90f9..1253af2d9971 100644 --- a/arch/arm/mach-imx/clk-imx31.c +++ b/arch/arm/mach-imx/clk-imx31.c @@ -130,7 +130,7 @@ int __init mx31_clocks_init(unsigned long fref) clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0"); clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); - clk_register_clkdev(clk[kpp_gate], "kpp", NULL); + clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c index f6086693ebd2..4bdcaa97bd98 100644 --- a/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/arch/arm/mach-imx/clk-imx51-imx53.c @@ -303,6 +303,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, clk_prepare_enable(clk[aips_tz2]); /* fec */ clk_prepare_enable(clk[spba]); clk_prepare_enable(clk[emi_fast_gate]); /* fec */ + clk_prepare_enable(clk[emi_slow_gate]); /* eim */ clk_prepare_enable(clk[tmax1]); clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */ clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */ diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index ebf680bebdf2..3fa6c51390da 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> +#include <linux/export.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/irq.h> diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 7b1055c8e0b9..3b2267529f5e 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -456,7 +456,7 @@ static void __init ap_init_timer(void) clk = clk_get_sys("ap_timer", NULL); BUG_ON(IS_ERR(clk)); - clk_enable(clk); + clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(0, TIMER0_VA_BASE + TIMER_CTRL); diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig index 199764fe0fb0..ca5c15a4e626 100644 --- a/arch/arm/mach-kirkwood/Kconfig +++ b/arch/arm/mach-kirkwood/Kconfig @@ -80,6 +80,35 @@ config MACH_IB62X0_DT RaidSonic IB-NAS6210 & IB-NAS6220 devices, using Flattened Device Tree. +config MACH_TS219_DT + bool "Device Tree for QNAP TS-11X, TS-21X NAS" + select ARCH_KIRKWOOD_DT + select ARM_APPENDED_DTB + select ARM_ATAG_DTB_COMPAT + help + Say 'Y' here if you want your kernel to support the QNAP + TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and + TS-219P+ Turbo NAS devices using Fattened Device Tree. + There are two different Device Tree descriptions, depending + on if the device is based on an if the board uses the MV6281 + or MV6282. If you have the wrong one, the buttons will not + work. + +config MACH_GOFLEXNET_DT + bool "Seagate GoFlex Net (Flattened Device Tree)" + select ARCH_KIRKWOOD_DT + help + Say 'Y' here if you want your kernel to support the + Seagate GoFlex Net (Flattened Device Tree). + +config MACH_LSXL_DT + bool "Buffalo Linkstation LS-XHL, LS-CHLv2 (Flattened Device Tree)" + select ARCH_KIRKWOOD_DT + help + Say 'Y' here if you want your kernel to support the + Buffalo Linkstation LS-XHL & LS-CHLv2 devices, using + Flattened Device Tree. + config MACH_TS219 bool "QNAP TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and TS-219P+ Turbo NAS" help diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile index d2b05907b10e..055c85a1cc46 100644 --- a/arch/arm/mach-kirkwood/Makefile +++ b/arch/arm/mach-kirkwood/Makefile @@ -25,3 +25,6 @@ obj-$(CONFIG_MACH_DREAMPLUG_DT) += board-dreamplug.o obj-$(CONFIG_MACH_ICONNECT_DT) += board-iconnect.o obj-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += board-dnskw.o obj-$(CONFIG_MACH_IB62X0_DT) += board-ib62x0.o +obj-$(CONFIG_MACH_TS219_DT) += board-ts219.o tsx1x-common.o +obj-$(CONFIG_MACH_GOFLEXNET_DT) += board-goflexnet.o +obj-$(CONFIG_MACH_LSXL_DT) += board-lsxl.o diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot index 02edbdf5b065..a5717558ee89 100644 --- a/arch/arm/mach-kirkwood/Makefile.boot +++ b/arch/arm/mach-kirkwood/Makefile.boot @@ -7,3 +7,7 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb +dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb +dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb diff --git a/arch/arm/mach-kirkwood/board-dnskw.c b/arch/arm/mach-kirkwood/board-dnskw.c index 58c2d68f9443..4ab35065a144 100644 --- a/arch/arm/mach-kirkwood/board-dnskw.c +++ b/arch/arm/mach-kirkwood/board-dnskw.c @@ -14,13 +14,11 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> -#include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/of.h> #include <linux/gpio.h> #include <linux/input.h> -#include <linux/gpio_keys.h> #include <linux/gpio-fan.h> #include <linux/leds.h> #include <asm/mach-types.h> @@ -35,10 +33,6 @@ static struct mv643xx_eth_platform_data dnskw_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; -static struct mv_sata_platform_data dnskw_sata_data = { - .n_ports = 2, -}; - static unsigned int dnskw_mpp_config[] __initdata = { MPP13_UART1_TXD, /* Custom ... */ MPP14_UART1_RXD, /* ... Controller (DNS-320 only) */ @@ -73,132 +67,6 @@ static unsigned int dnskw_mpp_config[] __initdata = { 0 }; -static struct gpio_led dns325_led_pins[] = { - { - .name = "dns325:white:power", - .gpio = 26, - .active_low = 1, - .default_trigger = "default-on", - }, - { - .name = "dns325:white:usb", - .gpio = 43, - .active_low = 1, - }, - { - .name = "dns325:red:l_hdd", - .gpio = 28, - .active_low = 1, - }, - { - .name = "dns325:red:r_hdd", - .gpio = 27, - .active_low = 1, - }, - { - .name = "dns325:red:usb", - .gpio = 29, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dns325_led_data = { - .num_leds = ARRAY_SIZE(dns325_led_pins), - .leds = dns325_led_pins, -}; - -static struct platform_device dns325_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dns325_led_data, - }, -}; - -static struct gpio_led dns320_led_pins[] = { - { - .name = "dns320:blue:power", - .gpio = 26, - .active_low = 1, - .default_trigger = "default-on", - }, - { - .name = "dns320:blue:usb", - .gpio = 43, - .active_low = 1, - }, - { - .name = "dns320:orange:l_hdd", - .gpio = 28, - .active_low = 1, - }, - { - .name = "dns320:orange:r_hdd", - .gpio = 27, - .active_low = 1, - }, - { - .name = "dns320:orange:usb", - .gpio = 35, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dns320_led_data = { - .num_leds = ARRAY_SIZE(dns320_led_pins), - .leds = dns320_led_pins, -}; - -static struct platform_device dns320_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dns320_led_data, - }, -}; - -static struct i2c_board_info dns325_i2c_board_info[] __initdata = { - { - I2C_BOARD_INFO("lm75", 0x48), - }, - /* Something at 0x0c also */ -}; - -static struct gpio_keys_button dnskw_button_pins[] = { - { - .code = KEY_POWER, - .gpio = 34, - .desc = "Power button", - .active_low = 1, - }, - { - .code = KEY_EJECTCD, - .gpio = 47, - .desc = "USB unmount button", - .active_low = 1, - }, - { - .code = KEY_RESTART, - .gpio = 48, - .desc = "Reset button", - .active_low = 1, - }, -}; - -static struct gpio_keys_platform_data dnskw_button_data = { - .buttons = dnskw_button_pins, - .nbuttons = ARRAY_SIZE(dnskw_button_pins), -}; - -static struct platform_device dnskw_button_device = { - .name = "gpio-keys", - .id = -1, - .num_resources = 0, - .dev = { - .platform_data = &dnskw_button_data, - } -}; - /* Fan: ADDA AD045HB-G73 40mm 6000rpm@5v */ static struct gpio_fan_speed dnskw_fan_speed[] = { { 0, 0 }, @@ -245,20 +113,9 @@ void __init dnskw_init(void) kirkwood_ehci_init(); kirkwood_ge00_init(&dnskw_ge00_data); - kirkwood_sata_init(&dnskw_sata_data); - kirkwood_i2c_init(); - platform_device_register(&dnskw_button_device); platform_device_register(&dnskw_fan_device); - if (of_machine_is_compatible("dlink,dns-325")) { - i2c_register_board_info(0, dns325_i2c_board_info, - ARRAY_SIZE(dns325_i2c_board_info)); - platform_device_register(&dns325_led_device); - - } else if (of_machine_is_compatible("dlink,dns-320")) - platform_device_register(&dns320_led_device); - /* Register power-off GPIO. */ if (gpio_request(36, "dnskw:power:off") == 0 && gpio_direction_output(36, 0) == 0) diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c index 55e357ab2923..aeb234d0d0e3 100644 --- a/arch/arm/mach-kirkwood/board-dreamplug.c +++ b/arch/arm/mach-kirkwood/board-dreamplug.c @@ -14,7 +14,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> -#include <linux/mtd/partitions.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/of.h> @@ -23,7 +22,6 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/gpio.h> -#include <linux/leds.h> #include <linux/mtd/physmap.h> #include <linux/spi/flash.h> #include <linux/spi/spi.h> @@ -36,42 +34,6 @@ #include "common.h" #include "mpp.h" -struct mtd_partition dreamplug_partitions[] = { - { - .name = "u-boot", - .size = SZ_512K, - .offset = 0, - }, - { - .name = "u-boot env", - .size = SZ_64K, - .offset = SZ_512K + SZ_512K, - }, - { - .name = "dtb", - .size = SZ_64K, - .offset = SZ_512K + SZ_512K + SZ_512K, - }, -}; - -static const struct flash_platform_data dreamplug_spi_slave_data = { - .type = "mx25l1606e", - .name = "spi_flash", - .parts = dreamplug_partitions, - .nr_parts = ARRAY_SIZE(dreamplug_partitions), -}; - -static struct spi_board_info __initdata dreamplug_spi_slave_info[] = { - { - .modalias = "m25p80", - .platform_data = &dreamplug_spi_slave_data, - .irq = -1, - .max_speed_hz = 50000000, - .bus_num = 0, - .chip_select = 0, - }, -}; - static struct mv643xx_eth_platform_data dreamplug_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; @@ -80,45 +42,10 @@ static struct mv643xx_eth_platform_data dreamplug_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(1), }; -static struct mv_sata_platform_data dreamplug_sata_data = { - .n_ports = 1, -}; - static struct mvsdio_platform_data dreamplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ }; -static struct gpio_led dreamplug_led_pins[] = { - { - .name = "dreamplug:blue:bluetooth", - .gpio = 47, - .active_low = 1, - }, - { - .name = "dreamplug:green:wifi", - .gpio = 48, - .active_low = 1, - }, - { - .name = "dreamplug:green:wifi_ap", - .gpio = 49, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dreamplug_led_data = { - .leds = dreamplug_led_pins, - .num_leds = ARRAY_SIZE(dreamplug_led_pins), -}; - -static struct platform_device dreamplug_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dreamplug_led_data, - } -}; - static unsigned int dreamplug_mpp_config[] __initdata = { MPP0_SPI_SCn, MPP1_SPI_MOSI, @@ -137,15 +64,8 @@ void __init dreamplug_init(void) */ kirkwood_mpp_conf(dreamplug_mpp_config); - spi_register_board_info(dreamplug_spi_slave_info, - ARRAY_SIZE(dreamplug_spi_slave_info)); - kirkwood_spi_init(); - kirkwood_ehci_init(); kirkwood_ge00_init(&dreamplug_ge00_data); kirkwood_ge01_init(&dreamplug_ge01_data); - kirkwood_sata_init(&dreamplug_sata_data); kirkwood_sdio_init(&dreamplug_mvsdio_data); - - platform_device_register(&dreamplug_leds); } diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index edc3f8a9d45e..e4eb450de301 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -18,6 +18,7 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/bridge-regs.h> +#include <plat/irq.h> #include "common.h" static struct of_device_id kirkwood_dt_match_table[] __initdata = { @@ -25,6 +26,16 @@ static struct of_device_id kirkwood_dt_match_table[] __initdata = { { } }; +struct of_dev_auxdata kirkwood_auxdata_lookup[] __initdata = { + OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL), + OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0", + NULL), + OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL), + OF_DEV_AUXDATA("marvell,orion-sata", 0xf1080000, "sata_mv.0", NULL), + OF_DEV_AUXDATA("marvell,orion-nand", 0xf4000000, "orion_nand", NULL), + {}, +}; + static void __init kirkwood_dt_init(void) { pr_info("Kirkwood: %s, TCLK=%d.\n", kirkwood_id(), kirkwood_tclk); @@ -47,7 +58,6 @@ static void __init kirkwood_dt_init(void) kirkwood_clk_init(); /* internal devices that every board has */ - kirkwood_wdt_init(); kirkwood_xor0_init(); kirkwood_xor1_init(); kirkwood_crypto_init(); @@ -68,7 +78,17 @@ static void __init kirkwood_dt_init(void) if (of_machine_is_compatible("raidsonic,ib-nas62x0")) ib62x0_init(); - of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL); + if (of_machine_is_compatible("qnap,ts219")) + qnap_dt_ts219_init(); + + if (of_machine_is_compatible("seagate,goflexnet")) + goflexnet_init(); + + if (of_machine_is_compatible("buffalo,lsxl")) + lsxl_init(); + + of_platform_populate(NULL, kirkwood_dt_match_table, + kirkwood_auxdata_lookup, NULL); } static const char *kirkwood_dt_board_compat[] = { @@ -77,6 +97,9 @@ static const char *kirkwood_dt_board_compat[] = { "dlink,dns-325", "iom,iconnect", "raidsonic,ib-nas62x0", + "qnap,ts219", + "seagate,goflexnet", + "buffalo,lsxl", NULL }; @@ -84,7 +107,7 @@ DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)") /* Maintainer: Jason Cooper <jason@lakedaemon.net> */ .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, - .init_irq = kirkwood_init_irq, + .init_irq = orion_dt_init_irq, .timer = &kirkwood_timer, .init_machine = kirkwood_dt_init, .restart = kirkwood_restart, diff --git a/arch/arm/mach-kirkwood/board-goflexnet.c b/arch/arm/mach-kirkwood/board-goflexnet.c new file mode 100644 index 000000000000..413e2c8ef5fe --- /dev/null +++ b/arch/arm/mach-kirkwood/board-goflexnet.c @@ -0,0 +1,71 @@ +/* + * Copyright 2012 (C), Jason Cooper <jason@lakedaemon.net> + * + * arch/arm/mach-kirkwood/board-goflexnet.c + * + * Seagate GoFlext Net Board Init for drivers not converted to + * flattened device tree yet. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Copied and modified for Seagate GoFlex Net support by + * Joshua Coombs <josh.coombs@gmail.com> based on ArchLinux ARM's + * GoFlex kernel patches. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> +#include <linux/mv643xx_eth.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/gpio.h> +#include <asm/mach-types.h> +#include <asm/mach/arch.h> +#include <asm/mach/map.h> +#include <mach/kirkwood.h> +#include <mach/bridge-regs.h> +#include <plat/mvsdio.h> +#include "common.h" +#include "mpp.h" + +static struct mv643xx_eth_platform_data goflexnet_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(0), +}; + +static unsigned int goflexnet_mpp_config[] __initdata = { + MPP29_GPIO, /* USB Power Enable */ + MPP47_GPIO, /* LED Orange */ + MPP46_GPIO, /* LED Green */ + MPP45_GPIO, /* LED Left Capacity 3 */ + MPP44_GPIO, /* LED Left Capacity 2 */ + MPP43_GPIO, /* LED Left Capacity 1 */ + MPP42_GPIO, /* LED Left Capacity 0 */ + MPP41_GPIO, /* LED Right Capacity 3 */ + MPP40_GPIO, /* LED Right Capacity 2 */ + MPP39_GPIO, /* LED Right Capacity 1 */ + MPP38_GPIO, /* LED Right Capacity 0 */ + 0 +}; + +void __init goflexnet_init(void) +{ + /* + * Basic setup. Needs to be called early. + */ + kirkwood_mpp_conf(goflexnet_mpp_config); + + if (gpio_request(29, "USB Power Enable") != 0 || + gpio_direction_output(29, 1) != 0) + pr_err("can't setup GPIO 29 (USB Power Enable)\n"); + kirkwood_ehci_init(); + + kirkwood_ge00_init(&goflexnet_ge00_data); +} diff --git a/arch/arm/mach-kirkwood/board-ib62x0.c b/arch/arm/mach-kirkwood/board-ib62x0.c index eddf1df8891f..cfc47f80e734 100644 --- a/arch/arm/mach-kirkwood/board-ib62x0.c +++ b/arch/arm/mach-kirkwood/board-ib62x0.c @@ -18,9 +18,7 @@ #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/gpio.h> -#include <linux/gpio_keys.h> #include <linux/input.h> -#include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> @@ -33,10 +31,6 @@ static struct mv643xx_eth_platform_data ib62x0_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; -static struct mv_sata_platform_data ib62x0_sata_data = { - .n_ports = 2, -}; - static unsigned int ib62x0_mpp_config[] __initdata = { MPP0_NF_IO2, MPP1_NF_IO3, @@ -55,69 +49,6 @@ static unsigned int ib62x0_mpp_config[] __initdata = { 0 }; -static struct gpio_led ib62x0_led_pins[] = { - { - .name = "ib62x0:green:os", - .default_trigger = "default-on", - .gpio = 25, - .active_low = 0, - }, - { - .name = "ib62x0:red:os", - .default_trigger = "none", - .gpio = 22, - .active_low = 0, - }, - { - .name = "ib62x0:red:usb_copy", - .default_trigger = "none", - .gpio = 27, - .active_low = 0, - }, -}; - -static struct gpio_led_platform_data ib62x0_led_data = { - .leds = ib62x0_led_pins, - .num_leds = ARRAY_SIZE(ib62x0_led_pins), -}; - -static struct platform_device ib62x0_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &ib62x0_led_data, - } -}; - -static struct gpio_keys_button ib62x0_button_pins[] = { - { - .code = KEY_COPY, - .gpio = 29, - .desc = "USB Copy", - .active_low = 1, - }, - { - .code = KEY_RESTART, - .gpio = 28, - .desc = "Reset", - .active_low = 1, - }, -}; - -static struct gpio_keys_platform_data ib62x0_button_data = { - .buttons = ib62x0_button_pins, - .nbuttons = ARRAY_SIZE(ib62x0_button_pins), -}; - -static struct platform_device ib62x0_button_device = { - .name = "gpio-keys", - .id = -1, - .num_resources = 0, - .dev = { - .platform_data = &ib62x0_button_data, - } -}; - static void ib62x0_power_off(void) { gpio_set_value(IB62X0_GPIO_POWER_OFF, 1); @@ -132,9 +63,6 @@ void __init ib62x0_init(void) kirkwood_ehci_init(); kirkwood_ge00_init(&ib62x0_ge00_data); - kirkwood_sata_init(&ib62x0_sata_data); - platform_device_register(&ib62x0_led_device); - platform_device_register(&ib62x0_button_device); if (gpio_request(IB62X0_GPIO_POWER_OFF, "ib62x0:power:off") == 0 && gpio_direction_output(IB62X0_GPIO_POWER_OFF, 0) == 0) pm_power_off = ib62x0_power_off; diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c index b0d3cc49269d..d7a9198ed300 100644 --- a/arch/arm/mach-kirkwood/board-iconnect.c +++ b/arch/arm/mach-kirkwood/board-iconnect.c @@ -19,8 +19,6 @@ #include <linux/mtd/partitions.h> #include <linux/mv643xx_eth.h> #include <linux/gpio.h> -#include <linux/leds.h> -#include <linux/i2c.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <asm/mach/arch.h> @@ -32,50 +30,6 @@ static struct mv643xx_eth_platform_data iconnect_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(11), }; -static struct gpio_led iconnect_led_pins[] = { - { - .name = "led_level", - .gpio = 41, - .default_trigger = "default-on", - }, { - .name = "power:blue", - .gpio = 42, - .default_trigger = "timer", - }, { - .name = "power:red", - .gpio = 43, - }, { - .name = "usb1:blue", - .gpio = 44, - }, { - .name = "usb2:blue", - .gpio = 45, - }, { - .name = "usb3:blue", - .gpio = 46, - }, { - .name = "usb4:blue", - .gpio = 47, - }, { - .name = "otb:blue", - .gpio = 48, - }, -}; - -static struct gpio_led_platform_data iconnect_led_data = { - .leds = iconnect_led_pins, - .num_leds = ARRAY_SIZE(iconnect_led_pins), - .gpio_blink_set = orion_gpio_led_blink_set, -}; - -static struct platform_device iconnect_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &iconnect_led_data, - } -}; - static unsigned int iconnect_mpp_config[] __initdata = { MPP12_GPIO, MPP35_GPIO, @@ -90,12 +44,6 @@ static unsigned int iconnect_mpp_config[] __initdata = { 0 }; -static struct i2c_board_info __initdata iconnect_board_info[] = { - { - I2C_BOARD_INFO("lm63", 0x4c), - }, -}; - static struct mtd_partition iconnect_nand_parts[] = { { .name = "flash", @@ -142,15 +90,11 @@ void __init iconnect_init(void) { kirkwood_mpp_conf(iconnect_mpp_config); kirkwood_nand_init(ARRAY_AND_SIZE(iconnect_nand_parts), 25); - kirkwood_i2c_init(); - i2c_register_board_info(0, iconnect_board_info, - ARRAY_SIZE(iconnect_board_info)); kirkwood_ehci_init(); kirkwood_ge00_init(&iconnect_ge00_data); platform_device_register(&iconnect_button_device); - platform_device_register(&iconnect_leds); } static int __init iconnect_pci_init(void) diff --git a/arch/arm/mach-kirkwood/board-lsxl.c b/arch/arm/mach-kirkwood/board-lsxl.c new file mode 100644 index 000000000000..83d8975592f8 --- /dev/null +++ b/arch/arm/mach-kirkwood/board-lsxl.c @@ -0,0 +1,135 @@ +/* + * Copyright 2012 (C), Michael Walle <michael@walle.cc> + * + * arch/arm/mach-kirkwood/board-lsxl.c + * + * Buffalo Linkstation LS-XHL and LS-CHLv2 init for drivers not + * converted to flattened device tree yet. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/mtd/partitions.h> +#include <linux/ata_platform.h> +#include <linux/spi/flash.h> +#include <linux/spi/spi.h> +#include <linux/mv643xx_eth.h> +#include <linux/gpio.h> +#include <linux/gpio-fan.h> +#include <linux/input.h> +#include <asm/mach-types.h> +#include <asm/mach/arch.h> +#include <mach/kirkwood.h> +#include "common.h" +#include "mpp.h" + +static struct mv643xx_eth_platform_data lsxl_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(0), +}; + +static struct mv643xx_eth_platform_data lsxl_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), +}; + +static unsigned int lsxl_mpp_config[] __initdata = { + MPP10_GPO, /* HDD Power Enable */ + MPP11_GPIO, /* USB Vbus Enable */ + MPP18_GPO, /* FAN High Enable# */ + MPP19_GPO, /* FAN Low Enable# */ + MPP36_GPIO, /* Function Blue LED */ + MPP37_GPIO, /* Alarm LED */ + MPP38_GPIO, /* Info LED */ + MPP39_GPIO, /* Power LED */ + MPP40_GPIO, /* Fan Lock */ + MPP41_GPIO, /* Function Button */ + MPP42_GPIO, /* Power Switch */ + MPP43_GPIO, /* Power Auto Switch */ + MPP48_GPIO, /* Function Red LED */ + 0 +}; + +#define LSXL_GPIO_FAN_HIGH 18 +#define LSXL_GPIO_FAN_LOW 19 +#define LSXL_GPIO_FAN_LOCK 40 + +static struct gpio_fan_alarm lsxl_alarm = { + .gpio = LSXL_GPIO_FAN_LOCK, +}; + +static struct gpio_fan_speed lsxl_speeds[] = { + { + .rpm = 0, + .ctrl_val = 3, + }, { + .rpm = 1500, + .ctrl_val = 1, + }, { + .rpm = 3250, + .ctrl_val = 2, + }, { + .rpm = 5000, + .ctrl_val = 0, + } +}; + +static int lsxl_gpio_list[] = { + LSXL_GPIO_FAN_HIGH, LSXL_GPIO_FAN_LOW, +}; + +static struct gpio_fan_platform_data lsxl_fan_data = { + .num_ctrl = ARRAY_SIZE(lsxl_gpio_list), + .ctrl = lsxl_gpio_list, + .alarm = &lsxl_alarm, + .num_speed = ARRAY_SIZE(lsxl_speeds), + .speed = lsxl_speeds, +}; + +static struct platform_device lsxl_fan_device = { + .name = "gpio-fan", + .id = -1, + .num_resources = 0, + .dev = { + .platform_data = &lsxl_fan_data, + }, +}; + +/* + * On the LS-XHL/LS-CHLv2, the shutdown process is following: + * - Userland monitors key events until the power switch goes to off position + * - The board reboots + * - U-boot starts and goes into an idle mode waiting for the user + * to move the switch to ON position + * + */ +static void lsxl_power_off(void) +{ + kirkwood_restart('h', NULL); +} + +#define LSXL_GPIO_HDD_POWER 10 +#define LSXL_GPIO_USB_POWER 11 + +void __init lsxl_init(void) +{ + /* + * Basic setup. Needs to be called early. + */ + kirkwood_mpp_conf(lsxl_mpp_config); + + /* usb and sata power on */ + gpio_set_value(LSXL_GPIO_USB_POWER, 1); + gpio_set_value(LSXL_GPIO_HDD_POWER, 1); + + kirkwood_ehci_init(); + kirkwood_ge00_init(&lsxl_ge00_data); + kirkwood_ge01_init(&lsxl_ge01_data); + platform_device_register(&lsxl_fan_device); + + /* register power-off method */ + pm_power_off = lsxl_power_off; +} diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c new file mode 100644 index 000000000000..1750e68506c1 --- /dev/null +++ b/arch/arm/mach-kirkwood/board-ts219.c @@ -0,0 +1,82 @@ +/* + * + * QNAP TS-11x/TS-21x Turbo NAS Board Setup via DT + * + * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch> + * + * Based on the board file ts219-setup.c: + * + * Copyright (C) 2009 Martin Michlmayr <tbm@cyrius.com> + * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/mv643xx_eth.h> +#include <linux/ata_platform.h> +#include <linux/gpio_keys.h> +#include <linux/input.h> +#include <asm/mach-types.h> +#include <asm/mach/arch.h> +#include <mach/kirkwood.h> +#include "common.h" +#include "mpp.h" +#include "tsx1x-common.h" + +static struct mv643xx_eth_platform_data qnap_ts219_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), +}; + +static unsigned int qnap_ts219_mpp_config[] __initdata = { + MPP0_SPI_SCn, + MPP1_SPI_MOSI, + MPP2_SPI_SCK, + MPP3_SPI_MISO, + MPP4_SATA1_ACTn, + MPP5_SATA0_ACTn, + MPP8_TW0_SDA, + MPP9_TW0_SCK, + MPP10_UART0_TXD, + MPP11_UART0_RXD, + MPP13_UART1_TXD, /* PIC controller */ + MPP14_UART1_RXD, /* PIC controller */ + MPP15_GPIO, /* USB Copy button (on devices with 88F6281) */ + MPP16_GPIO, /* Reset button (on devices with 88F6281) */ + MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */ + MPP37_GPIO, /* Reset button (on devices with 88F6282) */ + MPP43_GPIO, /* USB Copy button (on devices with 88F6282) */ + MPP44_GPIO, /* Board ID: 0: TS-11x, 1: TS-21x */ + 0 +}; + +void __init qnap_dt_ts219_init(void) +{ + u32 dev, rev; + + kirkwood_mpp_conf(qnap_ts219_mpp_config); + + kirkwood_pcie_id(&dev, &rev); + if (dev == MV88F6282_DEV_ID) + qnap_ts219_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); + + kirkwood_ge00_init(&qnap_ts219_ge00_data); + kirkwood_ehci_init(); + + pm_power_off = qnap_tsx1x_power_off; +} + +/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */ +static int __init ts219_pci_init(void) +{ + if (machine_is_ts219()) + kirkwood_pcie_init(KW_PCIE0); + + return 0; +} +subsys_initcall(ts219_pci_init); diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index c9201539ffbd..c4b64adcbfce 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -17,6 +17,7 @@ #include <linux/dma-mapping.h> #include <linux/clk-provider.h> #include <linux/spinlock.h> +#include <linux/mv643xx_i2c.h> #include <net/dsa.h> #include <asm/page.h> #include <asm/timex.h> @@ -276,6 +277,7 @@ void __init kirkwood_clk_init(void) orion_clkdev_add("0", "pcie", pex0); orion_clkdev_add("1", "pcie", pex1); orion_clkdev_add(NULL, "kirkwood-i2s", audio); + orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit); /* Marvell says runit is used by SPI, UART, NAND, TWSI, ..., * so should never be gated. diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h index 9248fa2c165b..304dd1abfdca 100644 --- a/arch/arm/mach-kirkwood/common.h +++ b/arch/arm/mach-kirkwood/common.h @@ -58,6 +58,11 @@ void dreamplug_init(void); #else static inline void dreamplug_init(void) {}; #endif +#ifdef CONFIG_MACH_TS219_DT +void qnap_dt_ts219_init(void); +#else +static inline void qnap_dt_ts219_init(void) {}; +#endif #ifdef CONFIG_MACH_DLINK_KIRKWOOD_DT void dnskw_init(void); @@ -77,6 +82,18 @@ void ib62x0_init(void); static inline void ib62x0_init(void) {}; #endif +#ifdef CONFIG_MACH_GOFLEXNET_DT +void goflexnet_init(void); +#else +static inline void goflexnet_init(void) {}; +#endif + +#ifdef CONFIG_MACH_LSXL_DT +void lsxl_init(void); +#else +static inline void lsxl_init(void) {}; +#endif + /* early init functions not converted to fdt yet */ char *kirkwood_id(void); void kirkwood_l2_init(void); diff --git a/arch/arm/mach-kirkwood/irq.c b/arch/arm/mach-kirkwood/irq.c index c4c68e5b94f1..720063ffa19d 100644 --- a/arch/arm/mach-kirkwood/irq.c +++ b/arch/arm/mach-kirkwood/irq.c @@ -9,20 +9,23 @@ */ #include <linux/gpio.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/irq.h> -#include <linux/io.h> #include <mach/bridge-regs.h> #include <plat/irq.h> -#include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - BUG_ON(irq < IRQ_KIRKWOOD_GPIO_LOW_0_7); - BUG_ON(irq > IRQ_KIRKWOOD_GPIO_HIGH_16_23); +static int __initdata gpio0_irqs[4] = { + IRQ_KIRKWOOD_GPIO_LOW_0_7, + IRQ_KIRKWOOD_GPIO_LOW_8_15, + IRQ_KIRKWOOD_GPIO_LOW_16_23, + IRQ_KIRKWOOD_GPIO_LOW_24_31, +}; - orion_gpio_irq_handler((irq - IRQ_KIRKWOOD_GPIO_LOW_0_7) << 3); -} +static int __initdata gpio1_irqs[4] = { + IRQ_KIRKWOOD_GPIO_HIGH_0_7, + IRQ_KIRKWOOD_GPIO_HIGH_8_15, + IRQ_KIRKWOOD_GPIO_HIGH_16_23, + 0, +}; void __init kirkwood_init_irq(void) { @@ -32,17 +35,8 @@ void __init kirkwood_init_irq(void) /* * Initialize gpiolib for GPIOs 0-49. */ - orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0, - IRQ_KIRKWOOD_GPIO_START); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler); - - orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0, - IRQ_KIRKWOOD_GPIO_START + 32); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, - gpio_irq_handler); + orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_LOW_VIRT_BASE, 0, + IRQ_KIRKWOOD_GPIO_START, gpio0_irqs); + orion_gpio_init(NULL, 32, 18, (void __iomem *)GPIO_HIGH_VIRT_BASE, 0, + IRQ_KIRKWOOD_GPIO_START + 32, gpio1_irqs); } diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index f516e74ce0d5..5c3d61ee729a 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -14,6 +14,7 @@ #include <asm/mach/arch.h> #include <asm/mach-types.h> +#include <mach/irqs.h> #include <mach/pxa168.h> #include <mach/mfp-pxa168.h> diff --git a/arch/arm/mach-mv78xx0/irq.c b/arch/arm/mach-mv78xx0/irq.c index e421b701663b..eff9a750bbe2 100644 --- a/arch/arm/mach-mv78xx0/irq.c +++ b/arch/arm/mach-mv78xx0/irq.c @@ -9,19 +9,17 @@ */ #include <linux/gpio.h> #include <linux/kernel.h> -#include <linux/init.h> -#include <linux/pci.h> #include <linux/irq.h> #include <mach/bridge-regs.h> #include <plat/irq.h> #include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - BUG_ON(irq < IRQ_MV78XX0_GPIO_0_7 || irq > IRQ_MV78XX0_GPIO_24_31); - - orion_gpio_irq_handler((irq - IRQ_MV78XX0_GPIO_0_7) << 3); -} +static int __initdata gpio0_irqs[4] = { + IRQ_MV78XX0_GPIO_0_7, + IRQ_MV78XX0_GPIO_8_15, + IRQ_MV78XX0_GPIO_16_23, + IRQ_MV78XX0_GPIO_24_31, +}; void __init mv78xx0_init_irq(void) { @@ -34,11 +32,7 @@ void __init mv78xx0_init_irq(void) * registers for core #1 are at an offset of 0x18 from those of * core #0.) */ - orion_gpio_init(0, 32, GPIO_VIRT_BASE, + orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_VIRT_BASE, mv78xx0_core_index() ? 0x18 : 0, - IRQ_MV78XX0_GPIO_START); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler); + IRQ_MV78XX0_GPIO_START, gpio0_irqs); } diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig index ccdf83b17cf1..9a8bbda195b2 100644 --- a/arch/arm/mach-mxs/Kconfig +++ b/arch/arm/mach-mxs/Kconfig @@ -2,9 +2,6 @@ if ARCH_MXS source "arch/arm/mach-mxs/devices/Kconfig" -config MXS_OCOTP - bool - config SOC_IMX23 bool select ARM_AMBA @@ -66,7 +63,6 @@ config MACH_MX28EVK select MXS_HAVE_PLATFORM_MXS_SAIF select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_RTC_STMP3XXX - select MXS_OCOTP help Include support for MX28EVK platform. This includes specific configurations for the board and its peripherals. @@ -94,7 +90,6 @@ config MODULE_M28 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXSFB - select MXS_OCOTP config MODULE_APX4 bool @@ -106,7 +101,6 @@ config MODULE_APX4 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXS_SAIF - select MXS_OCOTP config MACH_TX28 bool "Ka-Ro TX28 module" diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile index e41590ccb437..fed3695a1339 100644 --- a/arch/arm/mach-mxs/Makefile +++ b/arch/arm/mach-mxs/Makefile @@ -1,7 +1,6 @@ # Common support -obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o +obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o -obj-$(CONFIG_MXS_OCOTP) += ocotp.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c index da0e37d40823..e1362ce48497 100644 --- a/arch/arm/mach-omap1/board-h2-mmc.c +++ b/arch/arm/mach-omap1/board-h2-mmc.c @@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, .init = mmc_late_init, .cleanup = mmc_cleanup, - .dma_mask = 0xffffffff, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c index f8242aa9b763..c74daace8cd6 100644 --- a/arch/arm/mach-omap1/board-h3-mmc.c +++ b/arch/arm/mach-omap1/board-h3-mmc.c @@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on, */ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, - .dma_mask = 0xffffffff, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index 4007a372481b..2c0ca8fc3380 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot) static struct omap_mmc_platform_data nokia770_mmc2_data = { .nr_slots = 1, - .dma_mask = 0xffffffff, .max_freq = 12000000, .slots[0] = { .set_power = nokia770_mmc_set_power, diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index dd0fbf76ac79..dd2db025f778 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -62,6 +62,7 @@ config ARCH_OMAP4 select PM_OPP if PM select USB_ARCH_HAS_EHCI if USB_SUPPORT select ARM_CPU_SUSPEND if PM + select ARCH_NEEDS_CPU_IDLE_COUPLED config SOC_OMAP5 bool "TI OMAP5" diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 2c5d0ed75285..677357ff61ac 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = { .cleanup = n8x0_mmc_cleanup, .shutdown = n8x0_mmc_shutdown, .max_freq = 24000000, - .dma_mask = 0xffffffff, .slots[0] = { .wires = 4, .set_power = n8x0_mmc_set_power, diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 02d15bbd4e35..ee05e193fc61 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -21,6 +21,7 @@ #include "common.h" #include "pm.h" #include "prm.h" +#include "clockdomain.h" /* Machine specific information */ struct omap4_idle_statedata { @@ -47,10 +48,14 @@ static struct omap4_idle_statedata omap4_idle_data[] = { }, }; -static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; +static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; +static struct clockdomain *cpu_clkdm[NR_CPUS]; + +static atomic_t abort_barrier; +static bool cpu_done[NR_CPUS]; /** - * omap4_enter_idle - Programs OMAP4 to enter the specified state + * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions * @dev: cpuidle device * @drv: cpuidle driver * @index: the index of state to be entered @@ -59,60 +64,84 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; * specified low power state selected by the governor. * Returns the amount of time spent in the low power state. */ -static int omap4_enter_idle(struct cpuidle_device *dev, +static int omap4_enter_idle_simple(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + local_fiq_disable(); + omap_do_wfi(); + local_fiq_enable(); + + return index; +} + +static int omap4_enter_idle_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct omap4_idle_statedata *cx = &omap4_idle_data[index]; - u32 cpu1_state; int cpu_id = smp_processor_id(); local_fiq_disable(); /* - * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. + * CPU0 has to wait and stay ON until CPU1 is OFF state. * This is necessary to honour hardware recommondation * of triggeing all the possible low power modes once CPU1 is * out of coherency and in OFF mode. - * Update dev->last_state so that governor stats reflects right - * data. */ - cpu1_state = pwrdm_read_pwrst(cpu1_pd); - if (cpu1_state != PWRDM_POWER_OFF) { - index = drv->safe_state_index; - cx = &omap4_idle_data[index]; + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { + while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) { + cpu_relax(); + + /* + * CPU1 could have already entered & exited idle + * without hitting off because of a wakeup + * or a failed attempt to hit off mode. Check for + * that here, otherwise we could spin forever + * waiting for CPU1 off. + */ + if (cpu_done[1]) + goto fail; + + } } - if (index > 0) - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. */ - if (cx->cpu_state == PWRDM_POWER_OFF) - cpu_pm_enter(); - - pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); - omap_set_pwrdm_state(mpu_pd, cx->mpu_state); - - /* - * Call idle CPU cluster PM enter notifier chain - * to save GIC and wakeupgen context. - */ - if ((cx->mpu_state == PWRDM_POWER_RET) && - (cx->mpu_logic_state == PWRDM_POWER_OFF)) - cpu_cluster_pm_enter(); + cpu_pm_enter(); + + if (dev->cpu == 0) { + pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); + omap_set_pwrdm_state(mpu_pd, cx->mpu_state); + + /* + * Call idle CPU cluster PM enter notifier chain + * to save GIC and wakeupgen context. + */ + if ((cx->mpu_state == PWRDM_POWER_RET) && + (cx->mpu_logic_state == PWRDM_POWER_OFF)) + cpu_cluster_pm_enter(); + } omap4_enter_lowpower(dev->cpu, cx->cpu_state); + cpu_done[dev->cpu] = true; + + /* Wakeup CPU1 only if it is not offlined */ + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { + clkdm_wakeup(cpu_clkdm[1]); + clkdm_allow_idle(cpu_clkdm[1]); + } /* * Call idle CPU PM exit notifier chain to restore - * VFP and per CPU IRQ context. Only CPU0 state is - * considered since CPU1 is managed by CPU hotplug. + * VFP and per CPU IRQ context. */ - if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) - cpu_pm_exit(); + cpu_pm_exit(); /* * Call idle CPU cluster PM exit notifier chain @@ -121,8 +150,11 @@ static int omap4_enter_idle(struct cpuidle_device *dev, if (omap4_mpuss_read_prev_context_state()) cpu_cluster_pm_exit(); - if (index > 0) - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + +fail: + cpuidle_coupled_parallel_barrier(dev, &abort_barrier); + cpu_done[dev->cpu] = false; local_fiq_enable(); @@ -141,7 +173,7 @@ struct cpuidle_driver omap4_idle_driver = { .exit_latency = 2 + 2, .target_residency = 5, .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle, + .enter = omap4_enter_idle_simple, .name = "C1", .desc = "MPUSS ON" }, @@ -149,8 +181,8 @@ struct cpuidle_driver omap4_idle_driver = { /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, - .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, + .enter = omap4_enter_idle_coupled, .name = "C2", .desc = "MPUSS CSWR", }, @@ -158,8 +190,8 @@ struct cpuidle_driver omap4_idle_driver = { /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, - .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, + .enter = omap4_enter_idle_coupled, .name = "C3", .desc = "MPUSS OSWR", }, @@ -168,6 +200,16 @@ struct cpuidle_driver omap4_idle_driver = { .safe_state_index = 0, }; +/* + * For each cpu, setup the broadcast timer because local timers + * stops for the states above C1. + */ +static void omap_setup_broadcast_timer(void *arg) +{ + int cpu = smp_processor_id(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); +} + /** * omap4_idle_init - Init routine for OMAP4 idle * @@ -180,19 +222,30 @@ int __init omap4_idle_init(void) unsigned int cpu_id = 0; mpu_pd = pwrdm_lookup("mpu_pwrdm"); - cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); - cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); - if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) + cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); + cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); + if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) return -ENODEV; - dev = &per_cpu(omap4_idle_dev, cpu_id); - dev->cpu = cpu_id; + cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); + cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); + if (!cpu_clkdm[0] || !cpu_clkdm[1]) + return -ENODEV; + + /* Configure the broadcast timer on each cpu */ + on_each_cpu(omap_setup_broadcast_timer, NULL, 1); + + for_each_cpu(cpu_id, cpu_online_mask) { + dev = &per_cpu(omap4_idle_dev, cpu_id); + dev->cpu = cpu_id; + dev->coupled_cpus = *cpu_online_mask; - cpuidle_register_driver(&omap4_idle_driver); + cpuidle_register_driver(&omap4_idle_driver); - if (cpuidle_register_device(dev)) { - pr_err("%s: CPUidle register device failed\n", __func__); - return -EIO; + if (cpuidle_register_device(dev)) { + pr_err("%s: CPUidle register failed\n", __func__); + return -EIO; + } } return 0; diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 5fb47a14f4ba..af1ed7d24a1f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -37,6 +37,7 @@ #define DISPC_CONTROL 0x0040 #define DISPC_CONTROL2 0x0238 +#define DISPC_CONTROL3 0x0848 #define DISPC_IRQSTATUS 0x0018 #define DSS_SYSCONFIG 0x10 @@ -52,6 +53,7 @@ #define EVSYNC_EVEN_IRQ_SHIFT 2 #define EVSYNC_ODD_IRQ_SHIFT 3 #define FRAMEDONE2_IRQ_SHIFT 22 +#define FRAMEDONE3_IRQ_SHIFT 30 #define FRAMEDONETV_IRQ_SHIFT 24 /* @@ -376,7 +378,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data) static void dispc_disable_outputs(void) { u32 v, irq_mask = 0; - bool lcd_en, digit_en, lcd2_en = false; + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; int i; struct omap_dss_dispc_dev_attr *da; struct omap_hwmod *oh; @@ -405,7 +407,13 @@ static void dispc_disable_outputs(void) lcd2_en = v & LCD_EN_MASK; } - if (!(lcd_en | digit_en | lcd2_en)) + /* store value of LCDENABLE for LCD3 */ + if (da->manager_count > 3) { + v = omap_hwmod_read(oh, DISPC_CONTROL3); + lcd3_en = v & LCD_EN_MASK; + } + + if (!(lcd_en | digit_en | lcd2_en | lcd3_en)) return; /* no managers currently enabled */ /* @@ -426,10 +434,12 @@ static void dispc_disable_outputs(void) if (lcd2_en) irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT; + if (lcd3_en) + irq_mask |= 1 << FRAMEDONE3_IRQ_SHIFT; /* * clear any previous FRAMEDONE, FRAMEDONETV, - * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts + * EVSYNC_EVEN/ODD, FRAMEDONE2 or FRAMEDONE3 interrupts */ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS); @@ -445,12 +455,19 @@ static void dispc_disable_outputs(void) omap_hwmod_write(v, oh, DISPC_CONTROL2); } + /* disable LCD3 manager */ + if (da->manager_count > 3) { + v = omap_hwmod_read(oh, DISPC_CONTROL3); + v &= ~LCD_EN_MASK; + omap_hwmod_write(v, oh, DISPC_CONTROL3); + } + i = 0; while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) != irq_mask) { i++; if (i > FRAMEDONE_IRQ_TIMEOUT) { - pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n"); + pr_err("didn't get FRAMEDONE1/2/3 or TV interrupt\n"); break; } mdelay(1); diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index be697d4e0843..a9675d8d1822 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, mmc->slots[0].caps = c->caps; mmc->slots[0].pm_caps = c->pm_caps; mmc->slots[0].internal_clock = !c->ext_clock; - mmc->dma_mask = 0xffffffff; mmc->max_freq = c->max_freq; if (cpu_is_omap44xx()) mmc->reg_offset = OMAP4_MMC_REG_OFFSET; diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 13d20c8a283d..2ff6d41ec6c6 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -130,6 +130,7 @@ static struct clock_event_device clockevent_gpt = { .name = "gp_timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, + .rating = 300, .set_next_event = omap2_gp_timer_set_next_event, .set_mode = omap2_gp_timer_set_mode, }; @@ -223,7 +224,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id, clockevent_delta2ns(3, &clockevent_gpt); /* Timer internal resynch latency. */ - clockevent_gpt.cpumask = cpumask_of(0); + clockevent_gpt.cpumask = cpu_possible_mask; + clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev); clockevents_register_device(&clockevent_gpt); pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n", diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c index b1b45fff776e..17da7091d310 100644 --- a/arch/arm/mach-orion5x/irq.c +++ b/arch/arm/mach-orion5x/irq.c @@ -11,19 +11,16 @@ */ #include <linux/gpio.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/irq.h> -#include <linux/io.h> #include <mach/bridge-regs.h> #include <plat/irq.h> -#include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - BUG_ON(irq < IRQ_ORION5X_GPIO_0_7 || irq > IRQ_ORION5X_GPIO_24_31); - - orion_gpio_irq_handler((irq - IRQ_ORION5X_GPIO_0_7) << 3); -} +static int __initdata gpio0_irqs[4] = { + IRQ_ORION5X_GPIO_0_7, + IRQ_ORION5X_GPIO_8_15, + IRQ_ORION5X_GPIO_16_23, + IRQ_ORION5X_GPIO_24_31, +}; void __init orion5x_init_irq(void) { @@ -32,9 +29,6 @@ void __init orion5x_init_irq(void) /* * Initialize gpiolib for GPIOs 0-31. */ - orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START); - irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); + orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_VIRT_BASE, 0, + IRQ_ORION5X_GPIO_START, gpio0_irqs); } diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c index 0d024b1e916d..f224107de7bc 100644 --- a/arch/arm/mach-prima2/timer.c +++ b/arch/arm/mach-prima2/timer.c @@ -132,11 +132,11 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) { int i; - for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) + for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++) writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); - writel_relaxed(sirfsoc_timer_reg_val[i - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); - writel_relaxed(sirfsoc_timer_reg_val[i - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); + writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); + writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); } static struct clock_event_device sirfsoc_clockevent = { diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 5905ed130e94..d89d87ae144c 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = { static struct eeti_ts_platform_data eeti_ts_pdata = { .irq_active_high = 1, + .irq_gpio = GPIO_TOUCH_IRQ, }; static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = { .type = "eeti_ts", .addr = 0x0a, - .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ), .platform_data = &eeti_ts_pdata, }; diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index e24961109b70..d56b0f7f2b20 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -483,7 +483,7 @@ config MACH_NEO1973_GTA02 select I2C select POWER_SUPPLY select MACH_NEO1973 - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_USB_HOST help Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone @@ -493,7 +493,7 @@ config MACH_RX1950 select S3C24XX_DCLK select PM_H1940 if PM select I2C - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_NAND select S3C2410_IOTIMING if S3C2440_CPUFREQ select S3C2440_XTAL_16934400 diff --git a/arch/arm/mach-sa1100/leds-hackkit.c b/arch/arm/mach-sa1100/leds-hackkit.c index 6a2352436e62..f8e47235babe 100644 --- a/arch/arm/mach-sa1100/leds-hackkit.c +++ b/arch/arm/mach-sa1100/leds-hackkit.c @@ -10,6 +10,7 @@ * as cpu led, the green one is used as timer led. */ #include <linux/init.h> +#include <linux/io.h> #include <mach/hardware.h> #include <asm/leds.h> diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c index 0f882ecb7d81..6ec300549960 100644 --- a/arch/arm/mach-spear3xx/spear300.c +++ b/arch/arm/mach-spear3xx/spear300.c @@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c index bbcf4571d361..1d0e435b9045 100644 --- a/arch/arm/mach-spear3xx/spear310.c +++ b/arch/arm/mach-spear3xx/spear310.c @@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c index 88d483bcd66a..fd823c624575 100644 --- a/arch/arm/mach-spear3xx/spear320.c +++ b/arch/arm/mach-spear3xx/spear320.c @@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, }; diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c index 66db5f13af84..98144baf8883 100644 --- a/arch/arm/mach-spear3xx/spear3xx.c +++ b/arch/arm/mach-spear3xx/spear3xx.c @@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl_memcpy = + (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c index 9af67d003c62..5a5a52db252b 100644 --- a/arch/arm/mach-spear6xx/spear6xx.c +++ b/arch/arm/mach-spear6xx/spear6xx.c @@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = { .min_signal = 0, .max_signal = 0, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp2_rx", .min_signal = 6, .max_signal = 6, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 7, .max_signal = 7, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ext0_rx", .min_signal = 0, .max_signal = 0, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext0_tx", .min_signal = 1, .max_signal = 1, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_rx", .min_signal = 2, .max_signal = 2, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_tx", .min_signal = 3, .max_signal = 3, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_rx", .min_signal = 4, .max_signal = 4, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_tx", .min_signal = 5, .max_signal = 5, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_rx", .min_signal = 6, .max_signal = 6, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_tx", .min_signal = 7, .max_signal = 7, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_rx", .min_signal = 8, .max_signal = 8, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_tx", .min_signal = 9, .max_signal = 9, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_rx", .min_signal = 10, .max_signal = 10, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_tx", .min_signal = 11, .max_signal = 11, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_rx", .min_signal = 12, .max_signal = 12, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_tx", .min_signal = 13, .max_signal = 13, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_rx", .min_signal = 14, .max_signal = 14, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_tx", .min_signal = 15, .max_signal = 15, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, }; @@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl_memcpy = + (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c index 8fd387bf31f0..b7344beec102 100644 --- a/arch/arm/mach-tegra/board-harmony-power.c +++ b/arch/arm/mach-tegra/board-harmony-power.c @@ -51,7 +51,7 @@ static struct regulator_init_data ldo0_data = { .consumer_supplies = tps658621_ldo0_supply, }; -#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv) \ +#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\ static struct regulator_init_data _id##_data = { \ .supply_regulator = _supply, \ .constraints = { \ @@ -63,21 +63,22 @@ static struct regulator_init_data ldo0_data = { .valid_ops_mask = (REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_STATUS | \ REGULATOR_CHANGE_VOLTAGE), \ + .always_on = _on, \ }, \ } -HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550); -HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475); -HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300); -HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300); +HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550, 1); +HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1); +HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0); +HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1); +HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1); #define TPS_REG(_id, _data) \ { \ @@ -119,9 +120,10 @@ static struct i2c_board_info __initdata harmony_regulators[] = { int __init harmony_regulator_init(void) { + regulator_register_always_on(0, "vdd_sys", + NULL, 0, 5000000); + if (machine_is_harmony()) { - regulator_register_always_on(0, "vdd_sys", - NULL, 0, 5000000); i2c_register_board_info(3, harmony_regulators, 1); } else { /* Harmony, booted using device tree */ struct device_node *np; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c2cdf6500f75..4e7d1182e8a3 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -358,7 +358,7 @@ void __init dma_contiguous_remap(void) if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) - return; + continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); @@ -423,7 +423,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) unsigned int pageno; unsigned long flags; void *ptr = NULL; - size_t align; + unsigned long align_mask; if (!pool->vaddr) { WARN(1, "coherent pool not initialised!\n"); @@ -435,11 +435,11 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) * small, so align them to their order in pages, minimum is a page * size. This helps reduce fragmentation of the DMA space. */ - align = PAGE_SIZE << get_order(size); + align_mask = (1 << get_order(size)) - 1; spin_lock_irqsave(&pool->lock, flags); pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, (1 << align) - 1); + 0, count, align_mask); if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; @@ -648,12 +648,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); + } else if (__free_from_pool(cpu_addr, size)) { + return; } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { - if (__free_from_pool(cpu_addr, size)) - return; /* * Non-atomic allocations cannot be freed with IRQs disabled */ diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 77458548e031..40ca11ed6e5f 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval) struct page *page; struct address_space *mapping; - if (!pte_present_user(pteval)) - return; if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) /* only flush non-aliasing VIPT caches for exec mappings */ return; diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 845f461f8ec1..ea94765acf9a 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -39,10 +39,18 @@ ENTRY(v7wbi_flush_user_tlb_range) mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT asid r3, r3 @ mask ASID +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(W(mov) r3, #0 ) + ALT_UP(W(nop) ) +#endif orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ @@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c index c2193178210b..3ed1adbc09f8 100644 --- a/arch/arm/plat-mxc/tzic.c +++ b/arch/arm/plat-mxc/tzic.c @@ -23,6 +23,7 @@ #include <mach/hardware.h> #include <mach/common.h> +#include <mach/irqs.h> #include "irq-common.h" diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h index 5493bd95da5e..eb3e4d555343 100644 --- a/arch/arm/plat-omap/include/plat/mmc.h +++ b/arch/arm/plat-omap/include/plat/mmc.h @@ -81,8 +81,6 @@ struct omap_mmc_platform_data { /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); - u64 dma_mask; - /* Integrating attributes from the omap_hwmod layer */ u8 controller_flags; diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index c1793786aea9..d245a87dc014 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -47,6 +47,7 @@ void __init orion_clkdev_init(struct clk *tclk) orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk); orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk); orion_clkdev_add(NULL, "orion_wdt", tclk); + orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", tclk); } /* Fill in the resources structure and link it into the platform diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index af95af257301..dfda74fae6f2 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -8,15 +8,22 @@ * warranty of any kind, whether express or implied. */ +#define DEBUG + #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/leds.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <plat/gpio.h> /* * GPIO unit register offsets. @@ -38,6 +45,7 @@ struct orion_gpio_chip { unsigned long valid_output; int mask_offset; int secondary_irq_base; + struct irq_domain *domain; }; static void __iomem *GPIO_OUT(struct orion_gpio_chip *ochip) @@ -222,10 +230,10 @@ static int orion_gpio_to_irq(struct gpio_chip *chip, unsigned pin) struct orion_gpio_chip *ochip = container_of(chip, struct orion_gpio_chip, chip); - return ochip->secondary_irq_base + pin; + return irq_create_mapping(ochip->domain, + ochip->secondary_irq_base + pin); } - /* * Orion-specific GPIO API extensions. */ @@ -353,12 +361,10 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) int pin; u32 u; - pin = d->irq - gc->irq_base; + pin = d->hwirq - ochip->secondary_irq_base; u = readl(GPIO_IO_CONF(ochip)) & (1 << pin); if (!u) { - printk(KERN_ERR "orion gpio_irq_set_type failed " - "(irq %d, pin %d).\n", d->irq, pin); return -EINVAL; } @@ -397,17 +403,53 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) u &= ~(1 << pin); /* rising */ writel(u, GPIO_IN_POL(ochip)); } - return 0; } -void __init orion_gpio_init(int gpio_base, int ngpio, - u32 base, int mask_offset, int secondary_irq_base) +static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +{ + struct orion_gpio_chip *ochip = irq_get_handler_data(irq); + u32 cause, type; + int i; + + if (ochip == NULL) + return; + + cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip)); + cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip)); + + for (i = 0; i < ochip->chip.ngpio; i++) { + int irq; + + irq = ochip->secondary_irq_base + i; + + if (!(cause & (1 << i))) + continue; + + type = irqd_get_trigger_type(irq_get_irq_data(irq)); + if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + /* Swap polarity (race with GPIO line) */ + u32 polarity; + + polarity = readl(GPIO_IN_POL(ochip)); + polarity ^= 1 << i; + writel(polarity, GPIO_IN_POL(ochip)); + } + generic_handle_irq(irq); + } +} + +void __init orion_gpio_init(struct device_node *np, + int gpio_base, int ngpio, + void __iomem *base, int mask_offset, + int secondary_irq_base, + int irqs[4]) { struct orion_gpio_chip *ochip; struct irq_chip_generic *gc; struct irq_chip_type *ct; char gc_label[16]; + int i; if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips)) return; @@ -426,6 +468,10 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ochip->chip.base = gpio_base; ochip->chip.ngpio = ngpio; ochip->chip.can_sleep = 0; +#ifdef CONFIG_OF + ochip->chip.of_node = np; +#endif + spin_lock_init(&ochip->lock); ochip->base = (void __iomem *)base; ochip->valid_input = 0; @@ -435,8 +481,6 @@ void __init orion_gpio_init(int gpio_base, int ngpio, gpiochip_add(&ochip->chip); - orion_gpio_chip_count++; - /* * Mask and clear GPIO interrupts. */ @@ -444,16 +488,28 @@ void __init orion_gpio_init(int gpio_base, int ngpio, writel(0, GPIO_EDGE_MASK(ochip)); writel(0, GPIO_LEVEL_MASK(ochip)); - gc = irq_alloc_generic_chip("orion_gpio_irq", 2, secondary_irq_base, + /* Setup the interrupt handlers. Each chip can have up to 4 + * interrupt handlers, with each handler dealing with 8 GPIO + * pins. */ + + for (i = 0; i < 4; i++) { + if (irqs[i]) { + irq_set_handler_data(irqs[i], ochip); + irq_set_chained_handler(irqs[i], gpio_irq_handler); + } + } + + gc = irq_alloc_generic_chip("orion_gpio_irq", 2, + secondary_irq_base, ochip->base, handle_level_irq); gc->private = ochip; - ct = gc->chip_types; ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF; ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_irq_set_type; + ct->chip.name = ochip->chip.label; ct++; ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; @@ -464,41 +520,69 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_irq_set_type; ct->handler = handle_edge_irq; + ct->chip.name = ochip->chip.label; irq_setup_generic_chip(gc, IRQ_MSK(ngpio), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); -} -void orion_gpio_irq_handler(int pinoff) -{ - struct orion_gpio_chip *ochip; - u32 cause, type; - int i; - - ochip = orion_gpio_chip_find(pinoff); - if (ochip == NULL) - return; - - cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip)); - cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip)); - - for (i = 0; i < ochip->chip.ngpio; i++) { - int irq; + /* Setup irq domain on top of the generic chip. */ + ochip->domain = irq_domain_add_legacy(np, + ochip->chip.ngpio, + ochip->secondary_irq_base, + ochip->secondary_irq_base, + &irq_domain_simple_ops, + ochip); + if (!ochip->domain) + panic("%s: couldn't allocate irq domain (DT).\n", + ochip->chip.label); - irq = ochip->secondary_irq_base + i; + orion_gpio_chip_count++; +} - if (!(cause & (1 << i))) - continue; +#ifdef CONFIG_OF +static void __init orion_gpio_of_init_one(struct device_node *np, + int irq_gpio_base) +{ + int ngpio, gpio_base, mask_offset; + void __iomem *base; + int ret, i; + int irqs[4]; + int secondary_irq_base; + + ret = of_property_read_u32(np, "ngpio", &ngpio); + if (ret) + goto out; + ret = of_property_read_u32(np, "mask-offset", &mask_offset); + if (ret == -EINVAL) + mask_offset = 0; + else + goto out; + base = of_iomap(np, 0); + if (!base) + goto out; + + secondary_irq_base = irq_gpio_base + (32 * orion_gpio_chip_count); + gpio_base = 32 * orion_gpio_chip_count; + + /* Get the interrupt numbers. Each chip can have up to 4 + * interrupt handlers, with each handler dealing with 8 GPIO + * pins. */ + + for (i = 0; i < 4; i++) + irqs[i] = irq_of_parse_and_map(np, i); + + orion_gpio_init(np, gpio_base, ngpio, base, mask_offset, + secondary_irq_base, irqs); + return; +out: + pr_err("%s: %s: missing mandatory property\n", __func__, np->name); +} - type = irqd_get_trigger_type(irq_get_irq_data(irq)); - if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { - /* Swap polarity (race with GPIO line) */ - u32 polarity; +void __init orion_gpio_of_init(int irq_gpio_base) +{ + struct device_node *np; - polarity = readl(GPIO_IN_POL(ochip)); - polarity ^= 1 << i; - writel(polarity, GPIO_IN_POL(ochip)); - } - generic_handle_irq(irq); - } + for_each_compatible_node(np, NULL, "marvell,orion-gpio") + orion_gpio_of_init_one(np, irq_gpio_base); } +#endif diff --git a/arch/arm/plat-orion/include/plat/gpio.h b/arch/arm/plat-orion/include/plat/gpio.h index bec0c98ce41f..81c6fc8a7b28 100644 --- a/arch/arm/plat-orion/include/plat/gpio.h +++ b/arch/arm/plat-orion/include/plat/gpio.h @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/types.h> - +#include <linux/irqdomain.h> /* * Orion-specific GPIO API extensions. */ @@ -27,13 +27,11 @@ int orion_gpio_led_blink_set(unsigned gpio, int state, void orion_gpio_set_valid(unsigned pin, int mode); /* Initialize gpiolib. */ -void __init orion_gpio_init(int gpio_base, int ngpio, - u32 base, int mask_offset, int secondary_irq_base); - -/* - * GPIO interrupt handling. - */ -void orion_gpio_irq_handler(int irqoff); - +void __init orion_gpio_init(struct device_node *np, + int gpio_base, int ngpio, + void __iomem *base, int mask_offset, + int secondary_irq_base, + int irq[4]); +void __init orion_gpio_of_init(int irq_gpio_base); #endif diff --git a/arch/arm/plat-orion/include/plat/irq.h b/arch/arm/plat-orion/include/plat/irq.h index f05eeab94968..50547e417936 100644 --- a/arch/arm/plat-orion/include/plat/irq.h +++ b/arch/arm/plat-orion/include/plat/irq.h @@ -12,6 +12,5 @@ #define __PLAT_IRQ_H void orion_irq_init(unsigned int irq_start, void __iomem *maskaddr); - - +void __init orion_dt_init_irq(void); #endif diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c index 2d5b9c1ef389..d751964def4c 100644 --- a/arch/arm/plat-orion/irq.c +++ b/arch/arm/plat-orion/irq.c @@ -11,8 +11,12 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <plat/irq.h> +#include <plat/gpio.h> void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) { @@ -32,3 +36,39 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); } + +#ifdef CONFIG_OF +static int __init orion_add_irq_domain(struct device_node *np, + struct device_node *interrupt_parent) +{ + int i = 0, irq_gpio; + void __iomem *base; + + do { + base = of_iomap(np, i); + if (base) { + orion_irq_init(i * 32, base); + i++; + } + } while (base); + + irq_domain_add_legacy(np, i * 32, 0, 0, + &irq_domain_simple_ops, NULL); + + irq_gpio = i * 32; + orion_gpio_of_init(irq_gpio); + + return 0; +} + +static const struct of_device_id orion_irq_match[] = { + { .compatible = "marvell,orion-intc", + .data = orion_add_irq_domain, }, + {}, +}; + +void __init orion_dt_init_irq(void) +{ + of_irq_init(orion_irq_match); +} +#endif diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index 7aca31c1df1f..9c3b90c3538e 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -403,7 +403,8 @@ config S5P_DEV_USB_EHCI config S3C24XX_PWM bool "PWM device support" - select HAVE_PWM + select PWM + select PWM_SAMSUNG help Support for exporting the PWM timer blocks via the pwm device system diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h index 2bc6b54460a8..eb6590ded40d 100644 --- a/arch/arm/plat-spear/include/plat/pl080.h +++ b/arch/arm/plat-spear/include/plat/pl080.h @@ -14,8 +14,8 @@ #ifndef __PLAT_PL080_H #define __PLAT_PL080_H -struct pl08x_dma_chan; -int pl080_get_signal(struct pl08x_dma_chan *ch); -void pl080_put_signal(struct pl08x_dma_chan *ch); +struct pl08x_channel_data; +int pl080_get_signal(const struct pl08x_channel_data *cd); +void pl080_put_signal(const struct pl08x_channel_data *cd, int signal); #endif /* __PLAT_PL080_H */ diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c index 12cf27f935f9..cfa1199d0f4a 100644 --- a/arch/arm/plat-spear/pl080.c +++ b/arch/arm/plat-spear/pl080.c @@ -27,9 +27,8 @@ struct { unsigned char val; } signals[16] = {{0, 0}, }; -int pl080_get_signal(struct pl08x_dma_chan *ch) +int pl080_get_signal(const struct pl08x_channel_data *cd) { - const struct pl08x_channel_data *cd = ch->cd; unsigned int signal = cd->min_signal, val; unsigned long flags; @@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch) return signal; } -void pl080_put_signal(struct pl08x_dma_chan *ch) +void pl080_put_signal(const struct pl08x_channel_data *cd, int signal) { - const struct pl08x_channel_data *cd = ch->cd; unsigned long flags; spin_lock_irqsave(&lock, flags); /* if signal is not used */ - if (!signals[cd->min_signal].busy) + if (!signals[signal].busy) BUG(); - signals[cd->min_signal].busy--; + signals[signal].busy--; spin_unlock_irqrestore(&lock, flags); } diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index 4fa9903b83cf..cc926c985981 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -7,18 +7,20 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * Basic entry code, called from the kernel's undefined instruction trap. - * r0 = faulted instruction - * r5 = faulted PC+4 - * r9 = successful return - * r10 = thread_info structure - * lr = failure return */ #include <asm/thread_info.h> #include <asm/vfpmacros.h> #include "../kernel/entry-header.S" +@ VFP entry point. +@ +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address +@ r10 = this threads thread_info structure +@ lr = unrecognised instruction return address +@ IRQs disabled. +@ ENTRY(do_vfp) #ifdef CONFIG_PREEMPT ldr r4, [r10, #TI_PREEMPT] @ get preempt count diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index d50f0e486cf2..ea0349f63586 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -62,13 +62,13 @@ @ VFP hardware support entry point. @ -@ r0 = faulted instruction -@ r2 = faulted PC+4 -@ r9 = successful return +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address @ r10 = vfp_state union @ r11 = CPU number -@ lr = failure return - +@ lr = unrecognised instruction return address +@ IRQs enabled. ENTRY(vfp_support_entry) DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 @@ -162,9 +162,12 @@ vfp_hw_state_valid: @ exception before retrying branch @ out before setting an FPEXC that @ stops us reading stuff - VFPFMXR FPEXC, r1 @ restore FPEXC last - sub r2, r2, #4 - str r2, [sp, #S_PC] @ retry the instruction + VFPFMXR FPEXC, r1 @ Restore FPEXC last + sub r2, r2, #4 @ Retry current instruction - if Thumb + str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, + @ else it's one 32-bit instruction, so + @ always subtract 4 from the following + @ instruction address. #ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 586961929e96..c834b32af275 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -457,10 +457,16 @@ static int vfp_pm_suspend(void) /* disable, just in case */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + } else if (vfp_current_hw_state[ti->cpu]) { +#ifndef CONFIG_SMP + fmxr(FPEXC, fpexc | FPEXC_EN); + vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); + fmxr(FPEXC, fpexc); +#endif } /* clear any information we had about last context state */ - memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); + vfp_current_hw_state[ti->cpu] = NULL; return 0; } @@ -713,8 +719,10 @@ static int __init vfp_init(void) if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif +#ifdef CONFIG_VFPv3 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) elf_hwcap |= HWCAP_VFPv4; +#endif } } return 0; diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index ada8f0fc71e4..fb96e607adcf 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on); #ifdef CONFIG_MTD_UCLINUX extern struct map_info uclinux_ram_map; unsigned long memory_mtd_end, memory_mtd_start, mtd_size; -unsigned long _ebss; EXPORT_SYMBOL(memory_mtd_end); EXPORT_SYMBOL(memory_mtd_start); EXPORT_SYMBOL(mtd_size); diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 052f81a76239..983c859e40b7 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -6,6 +6,7 @@ config C6X def_bool y select CLKDEV_LOOKUP + select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 6d521d96d941..09c5a0f5f4d1 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h @@ -1,7 +1,7 @@ /* * Port on Texas Instruments TMS320C6x architecture * - * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated + * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) * * This program is free software; you can redistribute it and/or modify @@ -16,9 +16,14 @@ /* * Cache line size */ -#define L1D_CACHE_BYTES 64 -#define L1P_CACHE_BYTES 32 -#define L2_CACHE_BYTES 128 +#define L1D_CACHE_SHIFT 6 +#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT) + +#define L1P_CACHE_SHIFT 5 +#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT) + +#define L2_CACHE_SHIFT 7 +#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) /* * L2 used as cache @@ -29,7 +34,8 @@ * For practical reasons the L1_CACHE_BYTES defines should not be smaller than * the L2 line size */ -#define L1_CACHE_BYTES L2_CACHE_BYTES +#define L1_CACHE_SHIFT L2_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L2_CACHE_ALIGN_LOW(x) \ (((x) & ~(L2_CACHE_BYTES - 1))) diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 954d81e2e837..7913695b2fcb 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y -CONFIG_MISC_DEVICES=y CONFIG_INTEL_IOMMU=y diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 91c41ecfa6d9..f8e913365423 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_CRYPTO_MD5=y -CONFIG_MISC_DEVICES=y diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 6f38b6120d96..440578850ae5 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) srat_num_cpus++; } -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { unsigned long paddr, size; @@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) - return; + return -1; /* record this node in proximity bitmap */ pxm_bit_set(pxm); @@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) p->size = size; p->nid = pxm; num_node_memblks++; + return 0; } void __init acpi_numa_arch_fixup(void) diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 0b0f8b8c4a26..b22df9410dce 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -5,6 +5,7 @@ config M68K select HAVE_AOUT if MMU select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW + select GENERIC_ATOMIC64 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER if MMU @@ -54,18 +55,6 @@ config ZONE_DMA bool default y -config CPU_HAS_NO_BITFIELDS - bool - -config CPU_HAS_NO_MULDIV64 - bool - -config CPU_HAS_ADDRESS_SPACES - bool - -config FPU - bool - config HZ int default 1000 if CLEOPATRA diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index 43a9f8f1b8eb..c4eb79edecec 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -28,6 +28,7 @@ config COLDFIRE select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 select GENERIC_CSUM + select HAVE_CLK endchoice @@ -37,6 +38,7 @@ config M68000 bool select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 + select CPU_HAS_NO_UNALIGNED select GENERIC_CSUM help The Freescale (was Motorola) 68000 CPU is the first generation of @@ -48,6 +50,7 @@ config M68000 config MCPU32 bool select CPU_HAS_NO_BITFIELDS + select CPU_HAS_NO_UNALIGNED help The Freescale (was then Motorola) CPU32 is a CPU core that is based on the 68020 processor. For the most part it is used in @@ -56,7 +59,6 @@ config MCPU32 config M68020 bool "68020 support" depends on MMU - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68020 @@ -67,7 +69,6 @@ config M68020 config M68030 bool "68030 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68030 @@ -77,7 +78,6 @@ config M68030 config M68040 bool "68040 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68LC040 @@ -88,7 +88,6 @@ config M68040 config M68060 bool "68060 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68060 @@ -376,6 +375,18 @@ config NODES_SHIFT default "3" depends on !SINGLE_MEMORY_CHUNK +config CPU_HAS_NO_BITFIELDS + bool + +config CPU_HAS_NO_MULDIV64 + bool + +config CPU_HAS_NO_UNALIGNED + bool + +config CPU_HAS_ADDRESS_SPACES + bool + config FPU bool diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 0a30406b9442..f5565d6eeb8e 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id) timer_handler(irq, dev_id); - x=*(volatile unsigned char *)(timer+3); - x=*(volatile unsigned char *)(timer+5); + x = *(volatile unsigned char *)(apollo_timer + 3); + x = *(volatile unsigned char *)(apollo_timer + 5); return IRQ_HANDLED; } @@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id) void dn_sched_init(irq_handler_t timer_routine) { /* program timer 1 */ - *(volatile unsigned char *)(timer+3)=0x01; - *(volatile unsigned char *)(timer+1)=0x40; - *(volatile unsigned char *)(timer+5)=0x09; - *(volatile unsigned char *)(timer+7)=0xc4; + *(volatile unsigned char *)(apollo_timer + 3) = 0x01; + *(volatile unsigned char *)(apollo_timer + 1) = 0x40; + *(volatile unsigned char *)(apollo_timer + 5) = 0x09; + *(volatile unsigned char *)(apollo_timer + 7) = 0xc4; /* enable IRQ of PIC B */ *(volatile unsigned char *)(pica+1)&=(~8); #if 0 - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); + printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); + printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); #endif if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine)) diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index eafa2539a8ee..a74e5d95c384 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,4 +1,29 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += device.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += futex.h +generic-y += ioctl.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local64.h +generic-y += local.h +generic-y += mman.h +generic-y += mutex.h +generic-y += percpu.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += siginfo.h +generic-y += statfs.h +generic-y += topology.h +generic-y += types.h generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/arch/m68k/include/asm/MC68332.h b/arch/m68k/include/asm/MC68332.h deleted file mode 100644 index 6bb8f02685a2..000000000000 --- a/arch/m68k/include/asm/MC68332.h +++ /dev/null @@ -1,152 +0,0 @@ - -/* include/asm-m68knommu/MC68332.h: '332 control registers - * - * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>, - * - */ - -#ifndef _MC68332_H_ -#define _MC68332_H_ - -#define BYTE_REF(addr) (*((volatile unsigned char*)addr)) -#define WORD_REF(addr) (*((volatile unsigned short*)addr)) - -#define PORTE_ADDR 0xfffa11 -#define PORTE BYTE_REF(PORTE_ADDR) -#define DDRE_ADDR 0xfffa15 -#define DDRE BYTE_REF(DDRE_ADDR) -#define PEPAR_ADDR 0xfffa17 -#define PEPAR BYTE_REF(PEPAR_ADDR) - -#define PORTF_ADDR 0xfffa19 -#define PORTF BYTE_REF(PORTF_ADDR) -#define DDRF_ADDR 0xfffa1d -#define DDRF BYTE_REF(DDRF_ADDR) -#define PFPAR_ADDR 0xfffa1f -#define PFPAR BYTE_REF(PFPAR_ADDR) - -#define PORTQS_ADDR 0xfffc15 -#define PORTQS BYTE_REF(PORTQS_ADDR) -#define DDRQS_ADDR 0xfffc17 -#define DDRQS BYTE_REF(DDRQS_ADDR) -#define PQSPAR_ADDR 0xfffc16 -#define PQSPAR BYTE_REF(PQSPAR_ADDR) - -#define CSPAR0_ADDR 0xFFFA44 -#define CSPAR0 WORD_REF(CSPAR0_ADDR) -#define CSPAR1_ADDR 0xFFFA46 -#define CSPAR1 WORD_REF(CSPAR1_ADDR) -#define CSARBT_ADDR 0xFFFA48 -#define CSARBT WORD_REF(CSARBT_ADDR) -#define CSOPBT_ADDR 0xFFFA4A -#define CSOPBT WORD_REF(CSOPBT_ADDR) -#define CSBAR0_ADDR 0xFFFA4C -#define CSBAR0 WORD_REF(CSBAR0_ADDR) -#define CSOR0_ADDR 0xFFFA4E -#define CSOR0 WORD_REF(CSOR0_ADDR) -#define CSBAR1_ADDR 0xFFFA50 -#define CSBAR1 WORD_REF(CSBAR1_ADDR) -#define CSOR1_ADDR 0xFFFA52 -#define CSOR1 WORD_REF(CSOR1_ADDR) -#define CSBAR2_ADDR 0xFFFA54 -#define CSBAR2 WORD_REF(CSBAR2_ADDR) -#define CSOR2_ADDR 0xFFFA56 -#define CSOR2 WORD_REF(CSOR2_ADDR) -#define CSBAR3_ADDR 0xFFFA58 -#define CSBAR3 WORD_REF(CSBAR3_ADDR) -#define CSOR3_ADDR 0xFFFA5A -#define CSOR3 WORD_REF(CSOR3_ADDR) -#define CSBAR4_ADDR 0xFFFA5C -#define CSBAR4 WORD_REF(CSBAR4_ADDR) -#define CSOR4_ADDR 0xFFFA5E -#define CSOR4 WORD_REF(CSOR4_ADDR) -#define CSBAR5_ADDR 0xFFFA60 -#define CSBAR5 WORD_REF(CSBAR5_ADDR) -#define CSOR5_ADDR 0xFFFA62 -#define CSOR5 WORD_REF(CSOR5_ADDR) -#define CSBAR6_ADDR 0xFFFA64 -#define CSBAR6 WORD_REF(CSBAR6_ADDR) -#define CSOR6_ADDR 0xFFFA66 -#define CSOR6 WORD_REF(CSOR6_ADDR) -#define CSBAR7_ADDR 0xFFFA68 -#define CSBAR7 WORD_REF(CSBAR7_ADDR) -#define CSOR7_ADDR 0xFFFA6A -#define CSOR7 WORD_REF(CSOR7_ADDR) -#define CSBAR8_ADDR 0xFFFA6C -#define CSBAR8 WORD_REF(CSBAR8_ADDR) -#define CSOR8_ADDR 0xFFFA6E -#define CSOR8 WORD_REF(CSOR8_ADDR) -#define CSBAR9_ADDR 0xFFFA70 -#define CSBAR9 WORD_REF(CSBAR9_ADDR) -#define CSOR9_ADDR 0xFFFA72 -#define CSOR9 WORD_REF(CSOR9_ADDR) -#define CSBAR10_ADDR 0xFFFA74 -#define CSBAR10 WORD_REF(CSBAR10_ADDR) -#define CSOR10_ADDR 0xFFFA76 -#define CSOR10 WORD_REF(CSOR10_ADDR) - -#define CSOR_MODE_ASYNC 0x0000 -#define CSOR_MODE_SYNC 0x8000 -#define CSOR_MODE_MASK 0x8000 -#define CSOR_BYTE_DISABLE 0x0000 -#define CSOR_BYTE_UPPER 0x4000 -#define CSOR_BYTE_LOWER 0x2000 -#define CSOR_BYTE_BOTH 0x6000 -#define CSOR_BYTE_MASK 0x6000 -#define CSOR_RW_RSVD 0x0000 -#define CSOR_RW_READ 0x0800 -#define CSOR_RW_WRITE 0x1000 -#define CSOR_RW_BOTH 0x1800 -#define CSOR_RW_MASK 0x1800 -#define CSOR_STROBE_DS 0x0400 -#define CSOR_STROBE_AS 0x0000 -#define CSOR_STROBE_MASK 0x0400 -#define CSOR_DSACK_WAIT(x) (wait << 6) -#define CSOR_DSACK_FTERM (14 << 6) -#define CSOR_DSACK_EXTERNAL (15 << 6) -#define CSOR_DSACK_MASK 0x03c0 -#define CSOR_SPACE_CPU 0x0000 -#define CSOR_SPACE_USER 0x0010 -#define CSOR_SPACE_SU 0x0020 -#define CSOR_SPACE_BOTH 0x0030 -#define CSOR_SPACE_MASK 0x0030 -#define CSOR_IPL_ALL 0x0000 -#define CSOR_IPL_PRIORITY(x) (x << 1) -#define CSOR_IPL_MASK 0x000e -#define CSOR_AVEC_ON 0x0001 -#define CSOR_AVEC_OFF 0x0000 -#define CSOR_AVEC_MASK 0x0001 - -#define CSBAR_ADDR(x) ((addr >> 11) << 3) -#define CSBAR_ADDR_MASK 0xfff8 -#define CSBAR_BLKSIZE_2K 0x0000 -#define CSBAR_BLKSIZE_8K 0x0001 -#define CSBAR_BLKSIZE_16K 0x0002 -#define CSBAR_BLKSIZE_64K 0x0003 -#define CSBAR_BLKSIZE_128K 0x0004 -#define CSBAR_BLKSIZE_256K 0x0005 -#define CSBAR_BLKSIZE_512K 0x0006 -#define CSBAR_BLKSIZE_1M 0x0007 -#define CSBAR_BLKSIZE_MASK 0x0007 - -#define CSPAR_DISC 0 -#define CSPAR_ALT 1 -#define CSPAR_CS8 2 -#define CSPAR_CS16 3 -#define CSPAR_MASK 3 - -#define CSPAR0_CSBOOT(x) (x << 0) -#define CSPAR0_CS0(x) (x << 2) -#define CSPAR0_CS1(x) (x << 4) -#define CSPAR0_CS2(x) (x << 6) -#define CSPAR0_CS3(x) (x << 8) -#define CSPAR0_CS4(x) (x << 10) -#define CSPAR0_CS5(x) (x << 12) - -#define CSPAR1_CS6(x) (x << 0) -#define CSPAR1_CS7(x) (x << 2) -#define CSPAR1_CS8(x) (x << 4) -#define CSPAR1_CS9(x) (x << 6) -#define CSPAR1_CS10(x) (x << 8) - -#endif diff --git a/arch/m68k/include/asm/apollodma.h b/arch/m68k/include/asm/apollodma.h deleted file mode 100644 index 954adc851adb..000000000000 --- a/arch/m68k/include/asm/apollodma.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_APOLLO_DMA_H -#define _ASM_APOLLO_DMA_H - -#include <asm/apollohw.h> /* need byte IO */ -#include <linux/spinlock.h> /* And spinlocks */ -#include <linux/delay.h> - - -#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val)) -#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE))) - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - -/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */ -#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */ -#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */ -#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */ -#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */ -#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */ -#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */ -#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */ - -#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */ -#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */ -#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */ -#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */ -#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */ -#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */ -#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */ -#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */ - -#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */ -#define DMA_ADDR_1 (IO_DMA1_BASE+0x02) -#define DMA_ADDR_2 (IO_DMA1_BASE+0x04) -#define DMA_ADDR_3 (IO_DMA1_BASE+0x06) -#define DMA_ADDR_4 (IO_DMA2_BASE+0x00) -#define DMA_ADDR_5 (IO_DMA2_BASE+0x04) -#define DMA_ADDR_6 (IO_DMA2_BASE+0x08) -#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C) - -#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */ -#define DMA_CNT_1 (IO_DMA1_BASE+0x03) -#define DMA_CNT_2 (IO_DMA1_BASE+0x05) -#define DMA_CNT_3 (IO_DMA1_BASE+0x07) -#define DMA_CNT_4 (IO_DMA2_BASE+0x02) -#define DMA_CNT_5 (IO_DMA2_BASE+0x06) -#define DMA_CNT_6 (IO_DMA2_BASE+0x0A) -#define DMA_CNT_7 (IO_DMA2_BASE+0x0E) - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - -#define DMA_8BIT 0 -#define DMA_16BIT 1 -#define DMA_BUSMASTER 2 - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while holding the DMA lock ! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } -} - - -/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -/* These are in arch/m68k/apollo/dma.c: */ -extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type); -extern void dma_unmap_page(unsigned short dma_addr); - -#endif /* _ASM_APOLLO_DMA_H */ diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h index a1373b9aa281..635ef4f89010 100644 --- a/arch/m68k/include/asm/apollohw.h +++ b/arch/m68k/include/asm/apollohw.h @@ -98,7 +98,7 @@ extern u_long timer_physaddr; #define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr)) #define pica (IO_BASE + pica_physaddr) #define picb (IO_BASE + picb_physaddr) -#define timer (IO_BASE + timer_physaddr) +#define apollo_timer (IO_BASE + timer_physaddr) #define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000)) #define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE) diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/arch/m68k/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h deleted file mode 100644 index c79c5e892305..000000000000 --- a/arch/m68k/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __M68K_CPUTIME_H -#define __M68K_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __M68K_CPUTIME_H */ diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h index 9c09becfd4c9..12d8fe4f1d30 100644 --- a/arch/m68k/include/asm/delay.h +++ b/arch/m68k/include/asm/delay.h @@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops) extern void __bad_udelay(void); -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 /* * The simpler m68k and ColdFire processors do not have a 32*32->64 * multiply instruction. So we need to handle them a little differently. diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h deleted file mode 100644 index d8f9872b0e2d..000000000000 --- a/arch/m68k/include/asm/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include <asm-generic/device.h> - diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/m68k/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h deleted file mode 100644 index 0d4e188d6ef6..000000000000 --- a/arch/m68k/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_ERRNO_H -#define _M68K_ERRNO_H - -#include <asm-generic/errno.h> - -#endif /* _M68K_ERRNO_H */ diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h deleted file mode 100644 index 6a332a9f099c..000000000000 --- a/arch/m68k/include/asm/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include <asm-generic/futex.h> - -#endif diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/arch/m68k/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/arch/m68k/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ipcbuf.h> diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/arch/m68k/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h deleted file mode 100644 index 6ece1b037665..000000000000 --- a/arch/m68k/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kdebug.h> diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h deleted file mode 100644 index 3413cc1390ec..000000000000 --- a/arch/m68k/include/asm/kmap_types.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_KMAP_TYPES_H -#define __ASM_M68K_KMAP_TYPES_H - -#include <asm-generic/kmap_types.h> - -#endif /* __ASM_M68K_KMAP_TYPES_H */ diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/arch/m68k/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kvm_para.h> diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h deleted file mode 100644 index 6c259263e1f0..000000000000 --- a/arch/m68k/include/asm/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_LOCAL_H -#define _ASM_M68K_LOCAL_H - -#include <asm-generic/local.h> - -#endif /* _ASM_M68K_LOCAL_H */ diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/arch/m68k/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local64.h> diff --git a/arch/m68k/include/asm/mac_mouse.h b/arch/m68k/include/asm/mac_mouse.h deleted file mode 100644 index 39a5c292eaee..000000000000 --- a/arch/m68k/include/asm/mac_mouse.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _ASM_MAC_MOUSE_H -#define _ASM_MAC_MOUSE_H - -/* - * linux/include/asm-m68k/mac_mouse.h - * header file for Macintosh ADB mouse driver - * 27-10-97 Michael Schmitz - * copied from: - * header file for Atari Mouse driver - * by Robert de Vries (robert@and.nl) on 19Jul93 - */ - -struct mouse_status { - char buttons; - short dx; - short dy; - int ready; - int active; - wait_queue_head_t wait; - struct fasync_struct *fasyncptr; -}; - -#endif diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h deleted file mode 100644 index 319899c47a2c..000000000000 --- a/arch/m68k/include/asm/mcfmbus.h +++ /dev/null @@ -1,77 +0,0 @@ -/****************************************************************************/ - -/* - * mcfmbus.h -- Coldfire MBUS support defines. - * - * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de) - */ - -/****************************************************************************/ - - -#ifndef mcfmbus_h -#define mcfmbus_h - - -#define MCFMBUS_BASE 0x280 -#define MCFMBUS_IRQ_VECTOR 0x19 -#define MCFMBUS_IRQ 0x1 -#define MCFMBUS_CLK 0x3f -#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/ -#define MCFMBUS_ADDRESS 0x01 - - -/* -* Define the 5307 MBUS register set addresses -*/ - -#define MCFMBUS_MADR 0x00 -#define MCFMBUS_MFDR 0x04 -#define MCFMBUS_MBCR 0x08 -#define MCFMBUS_MBSR 0x0C -#define MCFMBUS_MBDR 0x10 - - -#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/ - -#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/ - -/* -* Define bit flags in Control Register -*/ - -#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */ -#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */ -#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */ -#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */ -#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */ -#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */ - -/* -* Define bit flags in Status Register -*/ - -#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */ -#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */ -#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */ -#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */ -#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */ -#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */ -#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */ - -/* -* Define bit flags in DATA I/O Register -*/ - -#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */ - -#define MBUSIOCSCLOCK 1 -#define MBUSIOCGCLOCK 2 -#define MBUSIOCSADDR 3 -#define MBUSIOCGADDR 4 -#define MBUSIOCSSLADDR 5 -#define MBUSIOCGSLADDR 6 -#define MBUSIOCSSUBADDR 7 -#define MBUSIOCGSUBADDR 8 - -#endif diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/arch/m68k/include/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/mman.h> diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/arch/m68k/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h deleted file mode 100644 index 0859d048faf5..000000000000 --- a/arch/m68k/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_PERCPU_H -#define __ASM_M68K_PERCPU_H - -#include <asm-generic/percpu.h> - -#endif /* __ASM_M68K_PERCPU_H */ diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h deleted file mode 100644 index e7d35019f337..000000000000 --- a/arch/m68k/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_RESOURCE_H -#define _M68K_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif /* _M68K_RESOURCE_H */ diff --git a/arch/m68k/include/asm/sbus.h b/arch/m68k/include/asm/sbus.h deleted file mode 100644 index bfe3ba147f2e..000000000000 --- a/arch/m68k/include/asm/sbus.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * some sbus structures and macros to make usage of sbus drivers possible - */ - -#ifndef __M68K_SBUS_H -#define __M68K_SBUS_H - -struct sbus_dev { - struct { - unsigned int which_io; - unsigned int phys_addr; - } reg_addrs[1]; -}; - -/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */ -/* No SBUS on the Sun3, kludge -- sam */ - -static inline void _sbus_writeb(unsigned char val, unsigned long addr) -{ - *(volatile unsigned char *)addr = val; -} - -static inline unsigned char _sbus_readb(unsigned long addr) -{ - return *(volatile unsigned char *)addr; -} - -static inline void _sbus_writel(unsigned long val, unsigned long addr) -{ - *(volatile unsigned long *)addr = val; - -} - -extern inline unsigned long _sbus_readl(unsigned long addr) -{ - return *(volatile unsigned long *)addr; -} - - -#define sbus_readb(a) _sbus_readb((unsigned long)a) -#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a) -#define sbus_readl(a) _sbus_readl((unsigned long)a) -#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a) - -#endif diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h deleted file mode 100644 index 312505452a1e..000000000000 --- a/arch/m68k/include/asm/scatterlist.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SCATTERLIST_H -#define _M68K_SCATTERLIST_H - -#include <asm-generic/scatterlist.h> - -#endif /* !(_M68K_SCATTERLIST_H) */ diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h deleted file mode 100644 index 5277e52715ec..000000000000 --- a/arch/m68k/include/asm/sections.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_M68K_SECTIONS_H -#define _ASM_M68K_SECTIONS_H - -#include <asm-generic/sections.h> - -extern char _sbss[], _ebss[]; - -#endif /* _ASM_M68K_SECTIONS_H */ diff --git a/arch/m68k/include/asm/shm.h b/arch/m68k/include/asm/shm.h deleted file mode 100644 index fa56ec84a126..000000000000 --- a/arch/m68k/include/asm/shm.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _M68K_SHM_H -#define _M68K_SHM_H - - -/* format of page table entries that correspond to shared memory pages - currently out in swap space (see also mm/swap.c): - bits 0-1 (PAGE_PRESENT) is = 0 - bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE - bits 31..9 are used like this: - bits 15..9 (SHM_ID) the id of the shared memory segment - bits 30..16 (SHM_IDX) the index of the page within the shared memory segment - (actually only bits 25..16 get used since SHMMAX is so low) - bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach -*/ -/* on the m68k both bits 0 and 1 must be zero */ -/* format on the sun3 is similar, but bits 30, 31 are set to zero and all - others are reduced by 2. --m */ - -#ifndef CONFIG_SUN3 -#define SHM_ID_SHIFT 9 -#else -#define SHM_ID_SHIFT 7 -#endif -#define _SHM_ID_BITS 7 -#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) - -#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS) -#define _SHM_IDX_BITS 15 -#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) - -#endif /* _M68K_SHM_H */ diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h deleted file mode 100644 index 851d3d784b53..000000000000 --- a/arch/m68k/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SIGINFO_H -#define _M68K_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h deleted file mode 100644 index 08d93f14e061..000000000000 --- a/arch/m68k/include/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_STATFS_H -#define _M68K_STATFS_H - -#include <asm-generic/statfs.h> - -#endif /* _M68K_STATFS_H */ diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h deleted file mode 100644 index ca173e9f26ff..000000000000 --- a/arch/m68k/include/asm/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_TOPOLOGY_H -#define _ASM_M68K_TOPOLOGY_H - -#include <asm-generic/topology.h> - -#endif /* _ASM_M68K_TOPOLOGY_H */ diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h deleted file mode 100644 index 89705adcbd52..000000000000 --- a/arch/m68k/include/asm/types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _M68K_TYPES_H -#define _M68K_TYPES_H - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ -#include <asm-generic/int-ll64.h> - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#endif /* __KERNEL__ */ - -#endif /* _M68K_TYPES_H */ diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h index f4043ae63db1..2b3ca0bf7a0d 100644 --- a/arch/m68k/include/asm/unaligned.h +++ b/arch/m68k/include/asm/unaligned.h @@ -2,7 +2,7 @@ #define _ASM_M68K_UNALIGNED_H -#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000) +#ifdef CONFIG_CPU_HAS_NO_UNALIGNED #include <linux/unaligned/be_struct.h> #include <linux/unaligned/le_byteshift.h> #include <linux/unaligned/generic.h> @@ -12,7 +12,7 @@ #else /* - * The m68k can do unaligned accesses itself. + * The m68k can do unaligned accesses itself. */ #include <linux/unaligned/access_ok.h> #include <linux/unaligned/generic.h> diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/arch/m68k/include/asm/xor.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/xor.h> diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index 7dc186b7a85f..71fb29938dba 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); #endif - pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " - "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, - (int) &_sdata, (int) &_edata, - (int) &_sbss, (int) &_ebss); - pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ", - (int) &_ebss, (int) memory_start, - (int) memory_start, (int) memory_end); + pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", + _stext, _etext, _sdata, _edata, __bss_start, __bss_stop); + pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", + __bss_stop, memory_start, memory_start, memory_end); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 8623f8dc16f8..9a5932ec3689 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, goto bad_access; } - mem_value = *mem; + /* + * No need to check for EFAULT; we know that the page is + * present and writable. + */ + __get_user(mem_value, mem); if (mem_value == oldval) - *mem = newval; + __put_user(newval, mem); pte_unmap_unlock(pte, ptl); up_read(&mm->mmap_sem); diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index 40e02d9c38b4..06a763f49fd3 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -78,9 +78,7 @@ SECTIONS { __init_end = .; } - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _end = .; diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 63407c836826..d0993594f558 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -31,9 +31,7 @@ SECTIONS RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _edata = .; /* End of data section */ diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index ad0f46d64c0b..8080469ee6c1 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -44,9 +44,7 @@ __init_begin = .; . = ALIGN(PAGE_SIZE); __init_end = .; - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _end = . ; diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c index 79e928a525d0..ee5f0b1b5c5d 100644 --- a/arch/m68k/lib/muldi3.c +++ b/arch/m68k/lib/muldi3.c @@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c index f77f258dce3a..282f9de68966 100644 --- a/arch/m68k/mm/init_mm.c +++ b/arch/m68k/mm/init_mm.c @@ -104,7 +104,7 @@ void __init print_memmap(void) MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_stext, _etext), MLK_ROUNDUP(_sdata, _edata), - MLK_ROUNDUP(_sbss, _ebss)); + MLK_ROUNDUP(__bss_start, __bss_stop)); } void __init mem_init(void) diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c index 345ec0d83e3d..688e3664aea0 100644 --- a/arch/m68k/mm/init_no.c +++ b/arch/m68k/mm/init_no.c @@ -91,7 +91,7 @@ void __init mem_init(void) totalram_pages = free_all_bootmem(); codek = (_etext - _stext) >> 10; - datak = (_ebss - _sdata) >> 10; + datak = (__bss_stop - _sdata) >> 10; initk = (__init_begin - __init_end) >> 10; tmp = nr_free_pages() << PAGE_SHIFT; diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S index f632fdcb93e9..537d3245b539 100644 --- a/arch/m68k/platform/68328/head-de2.S +++ b/arch/m68k/platform/68328/head-de2.S @@ -60,8 +60,8 @@ _start: * Move ROM filesystem above bss :-) */ - moveal #_sbss, %a0 /* romfs at the start of bss */ - moveal #_ebss, %a1 /* Set up destination */ + moveal #__bss_start, %a0 /* romfs at the start of bss */ + moveal #__bss_stop, %a1 /* Set up destination */ movel %a0, %a2 /* Copy of bss start */ movel 8(%a0), %d1 /* Get size of ROMFS */ @@ -84,8 +84,8 @@ _start: * Initialize BSS segment to 0 */ - lea _sbss, %a0 - lea _ebss, %a1 + lea __bss_start, %a0 + lea __bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ 2: cmpal %a0, %a1 diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S index 2ebfd6420818..45a9dad29e3d 100644 --- a/arch/m68k/platform/68328/head-pilot.S +++ b/arch/m68k/platform/68328/head-pilot.S @@ -110,7 +110,7 @@ L0: movel #CONFIG_VECTORBASE, %d7 addl #16, %d7 moveal %d7, %a0 - moveal #_ebss, %a1 + moveal #__bss_stop, %a1 lea %a1@(512), %a2 DBG_PUTC('C') @@ -138,8 +138,8 @@ LD1: DBG_PUTC('E') - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -150,7 +150,7 @@ L1: DBG_PUTC('F') /* Copy command line from end of bss to command line */ - moveal #_ebss, %a0 + moveal #__bss_stop, %a0 moveal #command_line, %a1 lea %a1@(512), %a2 @@ -165,7 +165,7 @@ L3: movel #_sdata, %d0 movel %d0, _rambase - movel #_ebss, %d0 + movel #__bss_stop, %d0 movel %d0, _ramstart movel %a4, %d0 diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S index 7f1aeeacb219..5189ef926098 100644 --- a/arch/m68k/platform/68328/head-ram.S +++ b/arch/m68k/platform/68328/head-ram.S @@ -76,8 +76,8 @@ pclp3: beq pclp3 #endif /* DEBUG */ moveal #0x007ffff0, %ssp - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 >= %a1 */ L1: diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S index a5ff96d0295f..3dff98ba2e97 100644 --- a/arch/m68k/platform/68328/head-rom.S +++ b/arch/m68k/platform/68328/head-rom.S @@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr cmpal %a1, %a2 bhi 1b - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ 1: @@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr movel #_sdata, %d0 movel %d0, _rambase - movel #_ebss, %d0 + movel #__bss_stop, %d0 movel %d0, _ramstart movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0 movel %d0, _ramend diff --git a/arch/m68k/platform/68360/head-ram.S b/arch/m68k/platform/68360/head-ram.S index 8eb94fb6b971..acd213170d80 100644 --- a/arch/m68k/platform/68360/head-ram.S +++ b/arch/m68k/platform/68360/head-ram.S @@ -219,8 +219,8 @@ LD1: cmp.l #_edata, %a1 blt LD1 - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -234,7 +234,7 @@ load_quicc: store_ram_size: /* Set ram size information */ move.l #_sdata, _rambase - move.l #_ebss, _ramstart + move.l #__bss_stop, _ramstart move.l #RAMEND, %d0 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ move.l %d0, _ramend /* Different from RAMEND.*/ diff --git a/arch/m68k/platform/68360/head-rom.S b/arch/m68k/platform/68360/head-rom.S index 97510e55b802..dfc756d99886 100644 --- a/arch/m68k/platform/68360/head-rom.S +++ b/arch/m68k/platform/68360/head-rom.S @@ -13,7 +13,7 @@ */ .global _stext -.global _sbss +.global __bss_start .global _start .global _rambase @@ -229,8 +229,8 @@ LD1: cmp.l #_edata, %a1 blt LD1 - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -244,7 +244,7 @@ load_quicc: store_ram_size: /* Set ram size information */ move.l #_sdata, _rambase - move.l #_ebss, _ramstart + move.l #__bss_stop, _ramstart move.l #RAMEND, %d0 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ move.l %d0, _ramend /* Different from RAMEND.*/ diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S index 4e0c9eb3bd1f..b88f5716f357 100644 --- a/arch/m68k/platform/coldfire/head.S +++ b/arch/m68k/platform/coldfire/head.S @@ -230,8 +230,8 @@ _vstart: /* * Move ROM filesystem above bss :-) */ - lea _sbss,%a0 /* get start of bss */ - lea _ebss,%a1 /* set up destination */ + lea __bss_start,%a0 /* get start of bss */ + lea __bss_stop,%a1 /* set up destination */ movel %a0,%a2 /* copy of bss start */ movel 8(%a0),%d0 /* get size of ROMFS */ @@ -249,7 +249,7 @@ _copy_romfs: bne _copy_romfs #else /* CONFIG_ROMFS_FS */ - lea _ebss,%a1 + lea __bss_stop,%a1 movel %a1,_ramstart #endif /* CONFIG_ROMFS_FS */ @@ -257,8 +257,8 @@ _copy_romfs: /* * Zero out the bss region. */ - lea _sbss,%a0 /* get start of bss */ - lea _ebss,%a1 /* get end of bss */ + lea __bss_start,%a0 /* get start of bss */ + lea __bss_stop,%a1 /* get end of bss */ clrl %d0 /* set value */ _clear_bss: movel %d0,(%a0)+ /* clear each word */ diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c index d8e6349336b4..eeba067d565f 100644 --- a/arch/m68k/sun3/prom/init.c +++ b/arch/m68k/sun3/prom/init.c @@ -22,57 +22,13 @@ int prom_root_node; struct linux_nodeops *prom_nodeops; /* You must call prom_init() before you attempt to use any of the - * routines in the prom library. It returns 0 on success, 1 on - * failure. It gets passed the pointer to the PROM vector. + * routines in the prom library. + * It gets passed the pointer to the PROM vector. */ -extern void prom_meminit(void); -extern void prom_ranges_init(void); - void __init prom_init(struct linux_romvec *rp) { romvec = rp; -#ifndef CONFIG_SUN3 - switch(romvec->pv_romvers) { - case 0: - prom_vers = PROM_V0; - break; - case 2: - prom_vers = PROM_V2; - break; - case 3: - prom_vers = PROM_V3; - break; - case 4: - prom_vers = PROM_P1275; - prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n"); - prom_halt(); - break; - default: - prom_printf("PROMLIB: Bad PROM version %d\n", - romvec->pv_romvers); - prom_halt(); - break; - }; - - prom_rev = romvec->pv_plugin_revision; - prom_prev = romvec->pv_printrev; - prom_nodeops = romvec->pv_nodeops; - - prom_root_node = prom_getsibling(0); - if((prom_root_node == 0) || (prom_root_node == -1)) - prom_halt(); - - if((((unsigned long) prom_nodeops) == 0) || - (((unsigned long) prom_nodeops) == -1)) - prom_halt(); - - prom_meminit(); - - prom_ranges_init(); -#endif -// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n", -// romvec->pv_romvers, prom_rev); /* Initialization successful. */ return; diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h index 4487e150b455..c07ed5d2a820 100644 --- a/arch/microblaze/include/asm/sections.h +++ b/arch/microblaze/include/asm/sections.h @@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[]; extern unsigned long __ivt_start[], __ivt_end[]; extern char _etext[], _stext[]; -# ifdef CONFIG_MTD_UCLINUX -extern char *_ebss; -# endif - extern u32 _fdt_start[], _fdt_end[]; # endif /* !__ASSEMBLY__ */ diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index bb4907c828dc..2b25bcf05c00 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -21,9 +21,6 @@ #include <linux/ftrace.h> #include <linux/uaccess.h> -extern char *_ebss; -EXPORT_SYMBOL_GPL(_ebss); - #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); EXPORT_SYMBOL(_mcount); diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 16d8dfd9094b..4da971d4392f 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, /* Move ROMFS out of BSS before clearing it */ if (romfs_size > 0) { - memmove(&_ebss, (int *)romfs_base, romfs_size); + memmove(&__bss_stop, (int *)romfs_base, romfs_size); klimit += romfs_size; } #endif @@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, BUG_ON(romfs_size < 0); /* What else can we do? */ printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", - romfs_size, romfs_base, (unsigned)&_ebss); + romfs_size, romfs_base, (unsigned)&__bss_stop); printk("New klimit: 0x%08x\n", (unsigned)klimit); #endif diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 109e9d86ade4..936d01a689d7 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -131,7 +131,6 @@ SECTIONS { *(COMMON) . = ALIGN (4) ; __bss_stop = . ; - _ebss = . ; } . = ALIGN(PAGE_SIZE); _end = .; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e3efc06e6409..331d574df99c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -77,6 +77,7 @@ config AR7 select SYS_SUPPORTS_ZBOOT_UART16550 select ARCH_REQUIRE_GPIOLIB select VLYNQ + select HAVE_CLK help Support for the Texas Instruments AR7 System-on-a-Chip family: TNETD7100, 7200 and 7300. @@ -124,6 +125,7 @@ config BCM63XX select SYS_HAS_EARLY_PRINTK select SWAP_IO_SPACE select ARCH_REQUIRE_GPIOLIB + select HAVE_CLK help Support for BCM63XX based boards diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h index 83894aa7932c..c9456e7a7283 100644 --- a/arch/mips/include/asm/clock.h +++ b/arch/mips/include/asm/clock.h @@ -50,15 +50,4 @@ void clk_recalc_rate(struct clk *); int clk_register(struct clk *); void clk_unregister(struct clk *); -/* the exported API, in addition to clk_set_rate */ -/** - * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter - * @clk: clock source - * @rate: desired clock rate in Hz - * @algo_id: algorithm id to be passed down to ops->set_rate - * - * Returns success (0) or negative errno. - */ -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id); - #endif /* __ASM_MIPS_CLOCK_H */ diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h index 06367c37e1b2..5222a007bc21 100644 --- a/arch/mips/include/asm/mach-loongson/loongson.h +++ b/arch/mips/include/asm/mach-loongson/loongson.h @@ -245,7 +245,6 @@ static inline void do_perfcnt_IRQ(void) #ifdef CONFIG_CPU_SUPPORTS_CPUFREQ #include <linux/cpufreq.h> -extern void loongson2_cpu_wait(void); extern struct cpufreq_frequency_table loongson2_clockmod_table[]; /* Chip Config */ diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile index c3479a432efe..05a5715ee38c 100644 --- a/arch/mips/kernel/cpufreq/Makefile +++ b/arch/mips/kernel/cpufreq/Makefile @@ -2,4 +2,4 @@ # Makefile for the Linux/MIPS cpufreq. # -obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o +obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index ae5db206347c..e7c98e2b78b6 100644 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -19,7 +19,7 @@ #include <asm/clock.h> -#include <loongson.h> +#include <asm/mach-loongson/loongson.h> static uint nowait; @@ -181,6 +181,25 @@ static struct platform_driver platform_driver = { .id_table = platform_device_ids, }; +/* + * This is the simple version of Loongson-2 wait, Maybe we need do this in + * interrupt disabled context. + */ + +static DEFINE_SPINLOCK(loongson2_wait_lock); + +static void loongson2_cpu_wait(void) +{ + unsigned long flags; + u32 cpu_freq; + + spin_lock_irqsave(&loongson2_wait_lock, flags); + cpu_freq = LOONGSON_CHIPCFG0; + LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + spin_unlock_irqrestore(&loongson2_wait_lock, flags); +} + static int __init cpufreq_init(void) { int ret; diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index d3bcc33f4699..ce2f129b081f 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -135,6 +135,11 @@ void clk_deactivate(struct clk *clk) } EXPORT_SYMBOL(clk_deactivate); +struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) +{ + return NULL; +} + static inline u32 get_counter_resolution(void) { u32 res; diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index d185e8477fdf..6cfd6117fbfd 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -8,7 +8,10 @@ #include <linux/export.h> #include <linux/clk.h> +#include <linux/bootmem.h> #include <linux/of_platform.h> +#include <linux/of_fdt.h> + #include <asm/bootinfo.h> #include <asm/time.h> @@ -70,6 +73,25 @@ void __init plat_mem_setup(void) __dt_setup_arch(&__dtb_start); } +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_bootmem(base, size); +} + void __init prom_init(void) { /* call the soc specific detetcion code and get it to fill soc_info */ diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 83780f7c842b..befbb760ab76 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -20,10 +20,12 @@ /* clock control register */ #define CGU_IFCCR 0x0018 +#define CGU_IFCCR_VR9 0x0024 /* system clock register */ #define CGU_SYS 0x0010 /* pci control register */ #define CGU_PCICR 0x0034 +#define CGU_PCICR_VR9 0x0038 /* ephy configuration register */ #define CGU_EPHY 0x10 /* power control register */ @@ -80,6 +82,9 @@ static void __iomem *pmu_membase; void __iomem *ltq_cgu_membase; void __iomem *ltq_ebu_membase; +static u32 ifccr = CGU_IFCCR; +static u32 pcicr = CGU_PCICR; + /* legacy function kept alive to ease clkdev transition */ void ltq_pmu_enable(unsigned int module) { @@ -103,14 +108,14 @@ EXPORT_SYMBOL(ltq_pmu_disable); /* enable a hw clock */ static int cgu_enable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr); return 0; } /* disable a hw clock */ static void cgu_disable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr); } /* enable a clock gate */ @@ -138,22 +143,22 @@ static void pmu_disable(struct clk *clk) /* the pci enable helper */ static int pci_enable(struct clk *clk) { - unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR); + unsigned int val = ltq_cgu_r32(ifccr); /* set bus clock speed */ if (of_machine_is_compatible("lantiq,ar9")) { - ifccr &= ~0x1f00000; + val &= ~0x1f00000; if (clk->rate == CLOCK_33M) - ifccr |= 0xe00000; + val |= 0xe00000; else - ifccr |= 0x700000; /* 62.5M */ + val |= 0x700000; /* 62.5M */ } else { - ifccr &= ~0xf00000; + val &= ~0xf00000; if (clk->rate == CLOCK_33M) - ifccr |= 0x800000; + val |= 0x800000; else - ifccr |= 0x400000; /* 62.5M */ + val |= 0x400000; /* 62.5M */ } - ltq_cgu_w32(ifccr, CGU_IFCCR); + ltq_cgu_w32(val, ifccr); pmu_enable(clk); return 0; } @@ -161,18 +166,16 @@ static int pci_enable(struct clk *clk) /* enable the external clock as a source */ static int pci_ext_enable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16), - CGU_IFCCR); - ltq_cgu_w32((1 << 30), CGU_PCICR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr); + ltq_cgu_w32((1 << 30), pcicr); return 0; } /* disable the external clock as a source */ static void pci_ext_disable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16), - CGU_IFCCR); - ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr); + ltq_cgu_w32((1 << 31) | (1 << 30), pcicr); } /* enable a clockout source */ @@ -184,11 +187,11 @@ static int clkout_enable(struct clk *clk) for (i = 0; i < 4; i++) { if (clk->rates[i] == clk->rate) { int shift = 14 - (2 * clk->module); - unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR); + unsigned int val = ltq_cgu_r32(ifccr); - ifccr &= ~(3 << shift); - ifccr |= i << shift; - ltq_cgu_w32(ifccr, CGU_IFCCR); + val &= ~(3 << shift); + val |= i << shift; + ltq_cgu_w32(val, ifccr); return 0; } } @@ -336,8 +339,12 @@ void __init ltq_soc_init(void) clkdev_add_clkout(); /* add the soc dependent clocks */ - if (!of_machine_is_compatible("lantiq,vr9")) + if (of_machine_is_compatible("lantiq,vr9")) { + ifccr = CGU_IFCCR_VR9; + pcicr = CGU_PCICR_VR9; + } else { clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE); + } if (!of_machine_is_compatible("lantiq,ase")) { clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1); diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index aca93eed8779..263beb9322a8 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -41,6 +41,7 @@ config LEMOTE_MACH2F select CSRC_R4K if ! MIPS_EXTERNAL_TIMER select DMA_NONCOHERENT select GENERIC_ISA_DMA_SUPPORT_BROKEN + select HAVE_CLK select HW_HAS_PCI select I8259 select IRQ_CPU diff --git a/arch/mips/loongson/lemote-2f/Makefile b/arch/mips/loongson/lemote-2f/Makefile index 8699a53f0477..4f9eaa328a16 100644 --- a/arch/mips/loongson/lemote-2f/Makefile +++ b/arch/mips/loongson/lemote-2f/Makefile @@ -2,7 +2,7 @@ # Makefile for lemote loongson2f family machines # -obj-y += machtype.o irq.o reset.o ec_kb3310b.o +obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o # # Suspend Support diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/loongson/lemote-2f/clock.c index 5426779d9fdb..bc739d4bab2e 100644 --- a/arch/mips/kernel/cpufreq/loongson2_clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -6,14 +6,17 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ - -#include <linux/module.h> +#include <linux/clk.h> #include <linux/cpufreq.h> -#include <linux/platform_device.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> #include <asm/clock.h> - -#include <loongson.h> +#include <asm/mach-loongson/loongson.h> static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); @@ -89,12 +92,6 @@ EXPORT_SYMBOL(clk_put); int clk_set_rate(struct clk *clk, unsigned long rate) { - return clk_set_rate_ex(clk, rate, 0); -} -EXPORT_SYMBOL_GPL(clk_set_rate); - -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) -{ int ret = 0; int regval; int i; @@ -103,7 +100,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) unsigned long flags; spin_lock_irqsave(&clock_lock, flags); - ret = clk->ops->set_rate(clk, rate, algo_id); + ret = clk->ops->set_rate(clk, rate, 0); spin_unlock_irqrestore(&clock_lock, flags); } @@ -129,7 +126,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate_ex); +EXPORT_SYMBOL_GPL(clk_set_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { @@ -146,26 +143,3 @@ long clk_round_rate(struct clk *clk, unsigned long rate) return rate; } EXPORT_SYMBOL_GPL(clk_round_rate); - -/* - * This is the simple version of Loongson-2 wait, Maybe we need do this in - * interrupt disabled content - */ - -DEFINE_SPINLOCK(loongson2_wait_lock); -void loongson2_cpu_wait(void) -{ - u32 cpu_freq; - unsigned long flags; - - spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ - spin_unlock_irqrestore(&loongson2_wait_lock, flags); -} -EXPORT_SYMBOL_GPL(loongson2_cpu_wait); - -MODULE_AUTHOR("Yanhua <yanh@lemote.com>"); -MODULE_DESCRIPTION("cpufreq driver for Loongson 2F"); -MODULE_LICENSE("GPL"); diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig index 237fa214de9f..a9a14d6e81af 100644 --- a/arch/mips/loongson1/Kconfig +++ b/arch/mips/loongson1/Kconfig @@ -15,6 +15,7 @@ config LOONGSON1_LS1B select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM select SYS_HAS_EARLY_PRINTK + select HAVE_CLK endchoice diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c index 2d98fb030596..1bbbbec12085 100644 --- a/arch/mips/loongson1/common/clock.c +++ b/arch/mips/loongson1/common/clock.c @@ -38,12 +38,28 @@ struct clk *clk_get(struct device *dev, const char *name) } EXPORT_SYMBOL(clk_get); +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + unsigned long clk_get_rate(struct clk *clk) { return clk->rate; } EXPORT_SYMBOL(clk_get_rate); +void clk_put(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_put); + static void pll_clk_init(struct clk *clk) { u32 pll; diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig index 852ae4bb7a85..6d40bc783459 100644 --- a/arch/mips/txx9/Kconfig +++ b/arch/mips/txx9/Kconfig @@ -20,6 +20,7 @@ config MACH_TXX9 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN + select HAVE_CLK config TOSHIBA_JMR3927 bool "Toshiba JMR-TX3927 board" diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index d544d7816df3..dba1ce235da5 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -186,10 +186,13 @@ static void spufs_prune_dir(struct dentry *dir) static int spufs_rmdir(struct inode *parent, struct dentry *dir) { /* remove all entries */ + int res; spufs_prune_dir(dir); d_drop(dir); - - return simple_rmdir(parent, dir); + res = simple_rmdir(parent, dir); + /* We have to give up the mm_struct */ + spu_forget(SPUFS_I(dir->d_inode)->i_ctx); + return res; } static int spufs_fill_dir(struct dentry *dir, @@ -245,9 +248,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file) mutex_unlock(&parent->i_mutex); WARN_ON(ret); - /* We have to give up the mm_struct */ - spu_forget(ctx); - return dcache_dir_close(inode, file); } @@ -450,28 +450,24 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; - ret = -EPERM; if ((flags & SPU_CREATE_NOSCHED) && !capable(CAP_SYS_NICE)) - goto out_unlock; + return -EPERM; - ret = -EINVAL; if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) == SPU_CREATE_ISOLATE) - goto out_unlock; + return -EINVAL; - ret = -ENODEV; if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) - goto out_unlock; + return -ENODEV; gang = NULL; neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { gang = SPUFS_I(inode)->i_gang; - ret = -EINVAL; if (!gang) - goto out_unlock; + return -EINVAL; mutex_lock(&gang->aff_mutex); neighbor = spufs_assert_affinity(flags, gang, aff_filp); if (IS_ERR(neighbor)) { @@ -492,22 +488,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_context_open(&path); - if (ret < 0) { + if (ret < 0) WARN_ON(spufs_rmdir(inode, dentry)); - if (affinity) - mutex_unlock(&gang->aff_mutex); - mutex_unlock(&inode->i_mutex); - spu_forget(SPUFS_I(dentry->d_inode)->i_ctx); - goto out; - } out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); -out_unlock: - mutex_unlock(&inode->i_mutex); -out: - dput(dentry); return ret; } @@ -580,18 +566,13 @@ static int spufs_create_gang(struct inode *inode, int ret; ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO); - if (ret) - goto out; - - ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); + if (!ret) { + ret = spufs_gang_open(&path); + if (ret < 0) { + int err = simple_rmdir(inode, dentry); + WARN_ON(err); + } } - -out: - mutex_unlock(&inode->i_mutex); - dput(dentry); return ret; } @@ -601,40 +582,32 @@ static struct file_system_type spufs_type; long spufs_create(struct path *path, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp) { + struct inode *dir = path->dentry->d_inode; int ret; - ret = -EINVAL; /* check if we are on spufs */ if (path->dentry->d_sb->s_type != &spufs_type) - goto out; + return -EINVAL; /* don't accept undefined flags */ if (flags & (~SPU_CREATE_FLAG_ALL)) - goto out; + return -EINVAL; /* only threads can be underneath a gang */ - if (path->dentry != path->dentry->d_sb->s_root) { - if ((flags & SPU_CREATE_GANG) || - !SPUFS_I(path->dentry->d_inode)->i_gang) - goto out; - } + if (path->dentry != path->dentry->d_sb->s_root) + if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang) + return -EINVAL; mode &= ~current_umask(); if (flags & SPU_CREATE_GANG) - ret = spufs_create_gang(path->dentry->d_inode, - dentry, path->mnt, mode); + ret = spufs_create_gang(dir, dentry, path->mnt, mode); else - ret = spufs_create_context(path->dentry->d_inode, - dentry, path->mnt, flags, mode, + ret = spufs_create_context(dir, dentry, path->mnt, flags, mode, filp); if (ret >= 0) - fsnotify_mkdir(path->dentry->d_inode, dentry); - return ret; + fsnotify_mkdir(dir, dentry); -out: - mutex_unlock(&path->dentry->d_inode->i_mutex); - dput(dentry); return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 5665dcc382c7..5b7d8ffbf890 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); - path_put(&path); + done_path_create(&path, dentry); } return ret; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 76de6b68487c..107610e01a29 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -124,6 +124,7 @@ config S390 select GENERIC_TIME_VSYSCALL select GENERIC_CLOCKEVENTS select KTIME_SCALAR if 32BIT + select HAVE_ARCH_SECCOMP_FILTER config SCHED_OMIT_FRAME_POINTER def_bool y diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h index 0fb34027d3f6..a60d085ddb4d 100644 --- a/arch/s390/include/asm/sparsemem.h +++ b/arch/s390/include/asm/sparsemem.h @@ -4,13 +4,11 @@ #ifdef CONFIG_64BIT #define SECTION_SIZE_BITS 28 -#define MAX_PHYSADDR_BITS 46 #define MAX_PHYSMEM_BITS 46 #else #define SECTION_SIZE_BITS 25 -#define MAX_PHYSADDR_BITS 31 #define MAX_PHYSMEM_BITS 31 #endif /* CONFIG_64BIT */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index fb214dd9b7e0..fe7b99759e12 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -12,6 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 +#include <linux/audit.h> #include <linux/sched.h> #include <linux/err.h> #include <asm/ptrace.h> @@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task, regs->orig_gpr2 = args[0]; } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) + return AUDIT_ARCH_S390; +#endif + return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; +} #endif /* _ASM_SYSCALL_H */ diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index d1225089a4bb..f606d935f495 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } @@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index e835d6d5b7fd..2d82cfcbce5b 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_readv + jg compat_sys_process_vm_readv ENTRY(compat_sys_process_vm_writev_wrapper) lgfr %r2,%r2 # compat_pid_t @@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_writev + jg compat_sys_process_vm_writev diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index f4eb37680b91..e4be113fbac6 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; /* Do the secure computing check first. */ - secure_computing_strict(regs->gprs[2]); + if (secure_computing(regs->gprs[2])) { + /* seccomp failures shouldn't expose any additional code. */ + ret = -1; + goto out; + } /* * The sysc_tracesys code in entry.S stored the system @@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], regs->gprs[5]); +out: return ret ?: regs->gprs[2]; } diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index b4a29eee41b8..d0964d22adb5 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { unsigned int ret; - if (current->personality == PER_LINUX32 && personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig index 7048c03490d9..fb5805745ace 100644 --- a/arch/sh/boards/Kconfig +++ b/arch/sh/boards/Kconfig @@ -57,6 +57,7 @@ config SH_7724_SOLUTION_ENGINE depends on CPU_SUBTYPE_SH7724 select ARCH_REQUIRE_GPIOLIB select SND_SOC_AK4642 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select 7724 SolutionEngine if configuring for a Hitachi SH7724 evaluation board. @@ -140,6 +141,7 @@ config SH_RSK bool "Renesas Starter Kit" depends on CPU_SUBTYPE_SH7201 || CPU_SUBTYPE_SH7203 || \ CPU_SUBTYPE_SH7264 || CPU_SUBTYPE_SH7269 + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select this option if configuring for any of the RSK+ MCU evaluation platforms. @@ -159,6 +161,7 @@ config SH_SDK7786 select NO_IOPORT if !PCI select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_SRAM_POOL + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select SDK7786 if configuring for a Renesas Technology Europe SH7786-65nm board. @@ -173,6 +176,7 @@ config SH_SH7757LCR bool "SH7757LCR" depends on CPU_SUBTYPE_SH7757 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR config SH_SH7785LCR bool "SH7785LCR" @@ -206,6 +210,7 @@ config SH_MIGOR bool "Migo-R" depends on CPU_SUBTYPE_SH7722 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select Migo-R if configuring for the SH7722 Migo-R platform by Renesas System Solutions Asia Pte. Ltd. @@ -214,6 +219,7 @@ config SH_AP325RXA bool "AP-325RXA" depends on CPU_SUBTYPE_SH7723 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Renesas "AP-325RXA" support. Compatible with ALGO SYSTEM CO.,LTD. "AP-320A" @@ -222,6 +228,7 @@ config SH_KFR2R09 bool "KFR2R09" depends on CPU_SUBTYPE_SH7724 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help "Kit For R2R for 2009" support. @@ -230,6 +237,7 @@ config SH_ECOVEC depends on CPU_SUBTYPE_SH7724 select ARCH_REQUIRE_GPIOLIB select SND_SOC_DA7210 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Renesas "R0P7724LC0011/21RL (EcoVec)" support. @@ -305,6 +313,7 @@ config SH_MAGIC_PANEL_R2 bool "Magic Panel R2" depends on CPU_SUBTYPE_SH7720 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select Magic Panel R2 if configuring for Magic Panel R2. @@ -316,6 +325,7 @@ config SH_CAYMAN config SH_POLARIS bool "SMSC Polaris" select CPU_HAS_IPR_IRQ + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7709 help Select if configuring for an SMSC Polaris development board @@ -323,6 +333,7 @@ config SH_POLARIS config SH_SH2007 bool "SH-2007 board" select NO_IOPORT + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7780 help SH-2007 is a single-board computer based around SH7780 chip @@ -334,6 +345,7 @@ config SH_SH2007 config SH_APSH4A3A bool "AP-SH4A-3A" select SH_ALPHA_BOARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7785 help Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A. @@ -342,6 +354,7 @@ config SH_APSH4AD0A bool "AP-SH4AD-0A" select SH_ALPHA_BOARD select SYS_SUPPORTS_PCI + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7786 help Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A. diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c index 2823619c6006..0a39c241628a 100644 --- a/arch/sh/boards/board-apsh4a3a.c +++ b/arch/sh/boards/board-apsh4a3a.c @@ -13,6 +13,8 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/physmap.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/irq.h> #include <linux/clk.h> @@ -66,6 +68,12 @@ static struct platform_device nor_flash_device = { .resource = nor_flash_resources, }; +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -105,6 +113,8 @@ static struct platform_device *apsh4a3a_devices[] __initdata = { static int __init apsh4a3a_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(apsh4a3a_devices, ARRAY_SIZE(apsh4a3a_devices)); } diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c index b4d6292a9247..92eac3a99187 100644 --- a/arch/sh/boards/board-apsh4ad0a.c +++ b/arch/sh/boards/board-apsh4ad0a.c @@ -12,12 +12,20 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/irq.h> #include <linux/clk.h> #include <asm/machvec.h> #include <asm/sizes.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -56,6 +64,8 @@ static struct platform_device *apsh4ad0a_devices[] __initdata = { static int __init apsh4ad0a_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(apsh4ad0a_devices, ARRAY_SIZE(apsh4ad0a_devices)); } diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c index 90568f9de3a4..20500858b56c 100644 --- a/arch/sh/boards/board-magicpanelr2.c +++ b/arch/sh/boards/board-magicpanelr2.c @@ -14,6 +14,8 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> @@ -24,6 +26,12 @@ #include <asm/heartbeat.h> #include <cpu/sh7720.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + #define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL) /* Wait until reset finished. Timeout is 100ms. */ @@ -348,6 +356,8 @@ static struct platform_device *mpr2_devices[] __initdata = { static int __init mpr2_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices)); } device_initcall(mpr2_devices_setup); diff --git a/arch/sh/boards/board-polaris.c b/arch/sh/boards/board-polaris.c index 0978ae2e4847..37a08d094727 100644 --- a/arch/sh/boards/board-polaris.c +++ b/arch/sh/boards/board-polaris.c @@ -9,6 +9,8 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/io.h> #include <asm/irq.h> @@ -22,6 +24,12 @@ #define AREA5_WAIT_CTRL (0x1C00) #define WAIT_STATES_10 (0x7) +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -88,6 +96,8 @@ static int __init polaris_initialise(void) printk(KERN_INFO "Configuring Polaris external bus\n"); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* Configure area 5 with 2 wait states */ wcr = __raw_readw(WCR2); wcr &= (~AREA5_WAIT_CTRL); diff --git a/arch/sh/boards/board-sh2007.c b/arch/sh/boards/board-sh2007.c index b90b78f6a829..1980bb7e5780 100644 --- a/arch/sh/boards/board-sh2007.c +++ b/arch/sh/boards/board-sh2007.c @@ -6,6 +6,8 @@ */ #include <linux/init.h> #include <linux/irq.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> @@ -13,6 +15,14 @@ #include <asm/machvec.h> #include <mach/sh2007.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), + REGULATOR_SUPPLY("vddvario", "smsc911x.1"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.1"), +}; + struct smsc911x_platform_config smc911x_info = { .flags = SMSC911X_USE_32BIT, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, @@ -98,6 +108,8 @@ static struct platform_device *sh2007_devices[] __initdata = { static int __init sh2007_io_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + platform_add_devices(sh2007_devices, ARRAY_SIZE(sh2007_devices)); return 0; } diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 5087f8bb4cff..41f86702eb9f 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c @@ -12,6 +12,8 @@ #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/irq.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/io.h> @@ -199,6 +201,15 @@ static struct platform_device sh7757_eth_giga1_device = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0, MMCIF */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; + /* SH_MMCIF */ static struct resource sh_mmcif_resources[] = { [0] = { @@ -329,6 +340,9 @@ static struct spi_board_info spi_board_info[] = { static int __init sh7757lcr_devices_setup(void) { + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* RGMII (PTA) */ gpio_request(GPIO_FN_ET0_MDC, NULL); gpio_request(GPIO_FN_ET0_MDIO, NULL); diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index f33ebf447073..9e963c1d1447 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -20,6 +20,8 @@ #include <linux/mtd/sh_flctl.h> #include <linux/delay.h> #include <linux/i2c.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/gpio.h> #include <linux/videodev2.h> @@ -34,6 +36,12 @@ #include <asm/suspend.h> #include <cpu/sh7723.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, @@ -423,6 +431,15 @@ static struct platform_device ceu_device = { }, }; +/* Fixed 3.3V regulators to be used by SDHI0, SDHI1 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + static struct resource sdhi0_cn3_resources[] = { [0] = { .name = "SDHI0", @@ -544,6 +561,10 @@ static int __init ap325rxa_devices_setup(void) &ap325rxa_sdram_leave_start, &ap325rxa_sdram_leave_end); + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* LD3 and LD4 LEDs */ gpio_request(GPIO_PTX5, NULL); /* RUN */ gpio_direction_output(GPIO_PTX5, 1); diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 4158d70c0dea..64559e8af14b 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -19,6 +19,8 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/usb/r8a66597.h> #include <linux/usb/renesas_usbhs.h> #include <linux/i2c.h> @@ -242,9 +244,17 @@ static int usbhs_get_id(struct platform_device *pdev) return gpio_get_value(GPIO_PTB3); } +static void usbhs_phy_reset(struct platform_device *pdev) +{ + /* enable vbus if HOST */ + if (!gpio_get_value(GPIO_PTB3)) + gpio_set_value(GPIO_PTB5, 1); +} + static struct renesas_usbhs_platform_info usbhs_info = { .platform_callback = { .get_id = usbhs_get_id, + .phy_reset = usbhs_phy_reset, }, .driver_param = { .buswait_bwait = 4, @@ -518,10 +528,86 @@ static struct i2c_board_info ts_i2c_clients = { .irq = IRQ0, }; +static struct regulator_consumer_supply cn12_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + +static struct regulator_init_data cn12_power_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(cn12_power_consumers), + .consumer_supplies = cn12_power_consumers, +}; + +static struct fixed_voltage_config cn12_power_info = { + .supply_name = "CN12 SD/MMC Vdd", + .microvolts = 3300000, + .gpio = GPIO_PTB7, + .enable_high = 1, + .init_data = &cn12_power_init_data, +}; + +static struct platform_device cn12_power = { + .name = "reg-fixed-voltage", + .id = 0, + .dev = { + .platform_data = &cn12_power_info, + }, +}; + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) /* SDHI0 */ +static struct regulator_consumer_supply sdhi0_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + +static struct regulator_init_data sdhi0_power_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(sdhi0_power_consumers), + .consumer_supplies = sdhi0_power_consumers, +}; + +static struct fixed_voltage_config sdhi0_power_info = { + .supply_name = "CN11 SD/MMC Vdd", + .microvolts = 3300000, + .gpio = GPIO_PTB6, + .enable_high = 1, + .init_data = &sdhi0_power_init_data, +}; + +static struct platform_device sdhi0_power = { + .name = "reg-fixed-voltage", + .id = 1, + .dev = { + .platform_data = &sdhi0_power_info, + }, +}; + static void sdhi0_set_pwr(struct platform_device *pdev, int state) { + static int power_gpio = -EINVAL; + + if (power_gpio < 0) { + int ret = gpio_request(GPIO_PTB6, NULL); + if (!ret) { + power_gpio = GPIO_PTB6; + gpio_direction_output(power_gpio, 0); + } + } + + /* + * Toggle the GPIO regardless, whether we managed to grab it above or + * the fixed regulator driver did. + */ gpio_set_value(GPIO_PTB6, state); } @@ -562,13 +648,27 @@ static struct platform_device sdhi0_device = { }, }; -#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) -/* SDHI1 */ -static void sdhi1_set_pwr(struct platform_device *pdev, int state) +static void cn12_set_pwr(struct platform_device *pdev, int state) { + static int power_gpio = -EINVAL; + + if (power_gpio < 0) { + int ret = gpio_request(GPIO_PTB7, NULL); + if (!ret) { + power_gpio = GPIO_PTB7; + gpio_direction_output(power_gpio, 0); + } + } + + /* + * Toggle the GPIO regardless, whether we managed to grab it above or + * the fixed regulator driver did. + */ gpio_set_value(GPIO_PTB7, state); } +#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) +/* SDHI1 */ static int sdhi1_get_cd(struct platform_device *pdev) { return !gpio_get_value(GPIO_PTW7); @@ -579,7 +679,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | MMC_CAP_NEEDS_POLL, - .set_pwr = sdhi1_set_pwr, + .set_pwr = cn12_set_pwr, .get_cd = sdhi1_get_cd, }; @@ -899,14 +999,9 @@ static struct platform_device vou_device = { #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) /* SH_MMCIF */ -static void mmcif_set_pwr(struct platform_device *pdev, int state) -{ - gpio_set_value(GPIO_PTB7, state); -} - static void mmcif_down_pwr(struct platform_device *pdev) { - gpio_set_value(GPIO_PTB7, 0); + cn12_set_pwr(pdev, 0); } static struct resource sh_mmcif_resources[] = { @@ -929,7 +1024,7 @@ static struct resource sh_mmcif_resources[] = { }; static struct sh_mmcif_plat_data sh_mmcif_plat = { - .set_pwr = mmcif_set_pwr, + .set_pwr = cn12_set_pwr, .down_pwr = mmcif_down_pwr, .sup_pclk = 0, /* SH7724: Max Pclk/2 */ .caps = MMC_CAP_4_BIT_DATA | @@ -960,7 +1055,9 @@ static struct platform_device *ecovec_devices[] __initdata = { &ceu0_device, &ceu1_device, &keysc_device, + &cn12_power, #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) + &sdhi0_power, &sdhi0_device, #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) &sdhi1_device, @@ -1258,8 +1355,6 @@ static int __init arch_setup(void) gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); - gpio_request(GPIO_PTB6, NULL); - gpio_direction_output(GPIO_PTB6, 0); #else /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */ gpio_request(GPIO_FN_MSIOF0_TXD, NULL); @@ -1288,8 +1383,6 @@ static int __init arch_setup(void) gpio_request(GPIO_FN_MMC_D0, NULL); gpio_request(GPIO_FN_MMC_CLK, NULL); gpio_request(GPIO_FN_MMC_CMD, NULL); - gpio_request(GPIO_PTB7, NULL); - gpio_direction_output(GPIO_PTB7, 0); cn12_enabled = true; #elif defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) @@ -1301,8 +1394,6 @@ static int __init arch_setup(void) gpio_request(GPIO_FN_SDHI1D2, NULL); gpio_request(GPIO_FN_SDHI1D1, NULL); gpio_request(GPIO_FN_SDHI1D0, NULL); - gpio_request(GPIO_PTB7, NULL); - gpio_direction_output(GPIO_PTB7, 0); /* Card-detect, used on CN12 with SDHI1 */ gpio_request(GPIO_PTW7, NULL); diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 43a179ce9afc..f2a4304fbe23 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -21,6 +21,8 @@ #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/i2c.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/usb/r8a66597.h> #include <linux/videodev2.h> #include <linux/sh_intc.h> @@ -341,6 +343,13 @@ static struct platform_device kfr2r09_camera = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + static struct resource kfr2r09_sh_sdhi0_resources[] = { [0] = { .name = "SDHI0", @@ -523,6 +532,9 @@ static int __init kfr2r09_devices_setup(void) &kfr2r09_sdram_leave_start, &kfr2r09_sdram_leave_end); + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* enable SCIF1 serial port for YC401 console support */ gpio_request(GPIO_FN_SCIF1_RXD, NULL); gpio_request(GPIO_FN_SCIF1_TXD, NULL); diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index a8a1ca741c85..8b73194ed2ce 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -17,6 +17,8 @@ #include <linux/mtd/physmap.h> #include <linux/mtd/nand.h> #include <linux/i2c.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smc91x.h> #include <linux/delay.h> #include <linux/clk.h> @@ -386,6 +388,13 @@ static struct platform_device migor_ceu_device = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + static struct resource sdhi_cn9_resources[] = { [0] = { .name = "SDHI", @@ -498,6 +507,10 @@ static int __init migor_devices_setup(void) &migor_sdram_enter_end, &migor_sdram_leave_start, &migor_sdram_leave_end); + + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* Let D11 LED show STATUS0 */ gpio_request(GPIO_FN_STATUS0, NULL); diff --git a/arch/sh/boards/mach-rsk/setup.c b/arch/sh/boards/mach-rsk/setup.c index 895f030070d3..2685ea03b064 100644 --- a/arch/sh/boards/mach-rsk/setup.c +++ b/arch/sh/boards/mach-rsk/setup.c @@ -16,9 +16,17 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/map.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <asm/machvec.h> #include <asm/io.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static const char *part_probes[] = { "cmdlinepart", NULL }; static struct mtd_partition rsk_partitions[] = { @@ -67,6 +75,8 @@ static struct platform_device *rsk_devices[] __initdata = { static int __init rsk_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(rsk_devices, ARRAY_SIZE(rsk_devices)); } diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c index 27a2314f50ac..c29268bfd34a 100644 --- a/arch/sh/boards/mach-sdk7786/setup.c +++ b/arch/sh/boards/mach-sdk7786/setup.c @@ -11,6 +11,8 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/i2c.h> #include <linux/irq.h> @@ -38,6 +40,12 @@ static struct platform_device heartbeat_device = { .resource = &heartbeat_resource, }; +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -236,6 +244,8 @@ static void __init sdk7786_setup(char **cmdline_p) { pr_info("Renesas Technology Europe SDK7786 support:\n"); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + sdk7786_fpga_init(); sdk7786_nmi_init(); diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index ffbf5bc7366b..35f6efa3ac0e 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -18,6 +18,8 @@ #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> #include <linux/delay.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smc91x.h> #include <linux/gpio.h> #include <linux/input.h> @@ -454,6 +456,15 @@ static struct platform_device sh7724_usb1_gadget_device = { .resource = sh7724_usb1_gadget_resources, }; +/* Fixed 3.3V regulator to be used by SDHI0, SDHI1 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + static struct resource sdhi0_cn7_resources[] = { [0] = { .name = "SDHI0", @@ -684,6 +695,10 @@ static int __init devices_setup(void) &ms7724se_sdram_enter_end, &ms7724se_sdram_leave_start, &ms7724se_sdram_leave_end); + + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* Reset Release */ fpga_out = __raw_readw(FPGA_OUT); /* bit4: NTSC_PDN, bit5: NTSC_RESET */ diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 4c171f13b0e8..b22565623142 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -335,7 +335,7 @@ static int dmae_irq_init(void) for (n = 0; n < NR_DMAE; n++) { int i = request_irq(get_dma_error_irq(n), dma_err, - IRQF_SHARED, dmae_name[n], NULL); + IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]); if (unlikely(i < 0)) { printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); return i; diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 4a5350037c8f..1b6199740e98 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -6,7 +6,6 @@ extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; -extern char _ebss[]; extern char __start_eh_frame[], __stop_eh_frame[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 48d14498e774..2a0ca8780f0d 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h @@ -183,18 +183,30 @@ enum { GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0, GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK, GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE, - GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22, - GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20, - GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18, - GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16, - GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14, - GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12, - GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10, - GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8, - GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6, - GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4, - GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2, - GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0, + GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22, + GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20, + GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18, + GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16, + GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14, + GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12, + GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10, + GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8, + GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6, + GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4, + GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2, + GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0, + GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22, + GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20, + GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18, + GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16, + GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14, + GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12, + GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10, + GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8, + GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6, + GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4, + GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2, + GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0, GPIO_FN_LCD_M_DISP, }; diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h index 41f9f8b9db73..5340f3bc1863 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7757.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h @@ -283,5 +283,7 @@ enum { SHDMA_SLAVE_RIIC8_RX, SHDMA_SLAVE_RIIC9_TX, SHDMA_SLAVE_RIIC9_RX, + SHDMA_SLAVE_RSPI_TX, + SHDMA_SLAVE_RSPI_RX, }; #endif /* __ASM_SH7757_H__ */ diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c index f25127c46eca..039e4587dd9b 100644 --- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c @@ -758,12 +758,22 @@ enum { DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK, LCD_CLK_MARK, LCD_EXTCLK_MARK, LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK, - LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK, - LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK, - LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, - LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, - LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, - LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, + LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK, + LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK, + LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK, + LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK, + LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK, + LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK, + LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK, + LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK, + LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK, + LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK, + LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK, + LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK, + LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK, + LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK, + LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK, + LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK, LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK, LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK, LCD_M_DISP_MARK, @@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PF1_DATA, PF1MD_000), PINMUX_DATA(BACK_MARK, PF1MD_001), + PINMUX_DATA(SSL10_MARK, PF1MD_011), PINMUX_DATA(TIOC4B_MARK, PF1MD_100), PINMUX_DATA(DACK0_MARK, PF1MD_101), @@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PG27_DATA, PG27MD_00), PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10), PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11), + PINMUX_DATA(LCD_DE_MARK, PG27MD_11), PINMUX_DATA(PG26_DATA, PG26MD_00), PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10), + PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10), PINMUX_DATA(PG25_DATA, PG25MD_00), PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10), + PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10), PINMUX_DATA(PG24_DATA, PG24MD_00), PINMUX_DATA(LCD_CLK_MARK, PG24MD_10), PINMUX_DATA(PG23_DATA, PG23MD_000), - PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010), + PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011), PINMUX_DATA(TXD5_MARK, PG23MD_100), PINMUX_DATA(PG22_DATA, PG22MD_000), - PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010), + PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011), PINMUX_DATA(RXD5_MARK, PG22MD_100), PINMUX_DATA(PG21_DATA, PG21MD_000), PINMUX_DATA(DV_DATA7_MARK, PG21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011), PINMUX_DATA(TXD4_MARK, PG21MD_100), PINMUX_DATA(PG20_DATA, PG20MD_000), PINMUX_DATA(DV_DATA6_MARK, PG20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011), PINMUX_DATA(RXD4_MARK, PG20MD_100), PINMUX_DATA(PG19_DATA, PG19MD_000), PINMUX_DATA(DV_DATA5_MARK, PG19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010), + PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010), PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011), PINMUX_DATA(SCK5_MARK, PG19MD_100), PINMUX_DATA(PG18_DATA, PG18MD_000), PINMUX_DATA(DV_DATA4_MARK, PG18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010), + PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010), PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011), PINMUX_DATA(SCK4_MARK, PG18MD_100), @@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = { // we're going with 2 bits PINMUX_DATA(PG17_DATA, PG17MD_00), PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01), - PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10), + PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10), // TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description // we're going with 2 bits PINMUX_DATA(PG16_DATA, PG16MD_00), PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01), - PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10), + PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10), PINMUX_DATA(PG15_DATA, PG15MD_00), PINMUX_DATA(D31_MARK, PG15MD_01), - PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10), + PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10), PINMUX_DATA(PINT7_PG_MARK, PG15MD_11), PINMUX_DATA(PG14_DATA, PG14MD_00), PINMUX_DATA(D30_MARK, PG14MD_01), - PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10), + PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10), PINMUX_DATA(PINT6_PG_MARK, PG14MD_11), PINMUX_DATA(PG13_DATA, PG13MD_00), PINMUX_DATA(D29_MARK, PG13MD_01), - PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10), + PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10), PINMUX_DATA(PINT5_PG_MARK, PG13MD_11), PINMUX_DATA(PG12_DATA, PG12MD_00), PINMUX_DATA(D28_MARK, PG12MD_01), - PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10), + PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10), PINMUX_DATA(PINT4_PG_MARK, PG12MD_11), PINMUX_DATA(PG11_DATA, PG11MD_000), PINMUX_DATA(D27_MARK, PG11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010), + PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010), PINMUX_DATA(PINT3_PG_MARK, PG11MD_011), PINMUX_DATA(TIOC3D_MARK, PG11MD_100), PINMUX_DATA(PG10_DATA, PG10MD_000), PINMUX_DATA(D26_MARK, PG10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010), + PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010), PINMUX_DATA(PINT2_PG_MARK, PG10MD_011), PINMUX_DATA(TIOC3C_MARK, PG10MD_100), PINMUX_DATA(PG9_DATA, PG9MD_000), PINMUX_DATA(D25_MARK, PG9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010), + PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010), PINMUX_DATA(PINT1_PG_MARK, PG9MD_011), PINMUX_DATA(TIOC3B_MARK, PG9MD_100), PINMUX_DATA(PG8_DATA, PG8MD_000), PINMUX_DATA(D24_MARK, PG8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010), + PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010), PINMUX_DATA(PINT0_PG_MARK, PG8MD_011), PINMUX_DATA(TIOC3A_MARK, PG8MD_100), PINMUX_DATA(PG7_DATA, PG7MD_000), PINMUX_DATA(D23_MARK, PG7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010), + PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010), PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011), PINMUX_DATA(TIOC2B_MARK, PG7MD_100), PINMUX_DATA(PG6_DATA, PG6MD_000), PINMUX_DATA(D22_MARK, PG6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010), + PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010), PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011), PINMUX_DATA(TIOC2A_MARK, PG6MD_100), PINMUX_DATA(PG5_DATA, PG5MD_000), PINMUX_DATA(D21_MARK, PG5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010), + PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010), PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011), PINMUX_DATA(TIOC1B_MARK, PG5MD_100), PINMUX_DATA(PG4_DATA, PG4MD_000), PINMUX_DATA(D20_MARK, PG4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010), + PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010), PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011), PINMUX_DATA(TIOC1A_MARK, PG4MD_100), PINMUX_DATA(PG3_DATA, PG3MD_000), PINMUX_DATA(D19_MARK, PG3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010), + PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010), PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011), PINMUX_DATA(TIOC0D_MARK, PG3MD_100), PINMUX_DATA(PG2_DATA, PG2MD_000), PINMUX_DATA(D18_MARK, PG2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010), + PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010), PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011), PINMUX_DATA(TIOC0C_MARK, PG2MD_100), PINMUX_DATA(PG1_DATA, PG1MD_000), PINMUX_DATA(D17_MARK, PG1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010), + PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010), PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011), PINMUX_DATA(TIOC0B_MARK, PG1MD_100), PINMUX_DATA(PG0_DATA, PG0MD_000), PINMUX_DATA(D16_MARK, PG0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010), + PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010), PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011), PINMUX_DATA(TIOC0A_MARK, PG0MD_100), @@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ23_DATA, PJ23MD_000), PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001), - PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010), + PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011), PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100), PINMUX_DATA(CTX1_MARK, PJ23MD_101), PINMUX_DATA(PJ22_DATA, PJ22MD_000), PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001), - PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010), + PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011), PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100), PINMUX_DATA(CRX1_MARK, PJ22MD_101), @@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ21_DATA, PJ21MD_000), PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010), + PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011), PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100), PINMUX_DATA(CTX2_MARK, PJ21MD_101), PINMUX_DATA(PJ20_DATA, PJ20MD_000), PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010), + PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010), PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011), PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100), PINMUX_DATA(CRX2_MARK, PJ20MD_101), @@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ19_DATA, PJ19MD_000), PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010), + PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010), PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011), PINMUX_DATA(TIOC0D_MARK, PJ19MD_100), PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101), @@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ18_DATA, PJ18MD_000), PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010), + PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010), PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011), PINMUX_DATA(TIOC0C_MARK, PJ18MD_100), PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101), PINMUX_DATA(PJ17_DATA, PJ17MD_000), PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001), - PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010), + PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010), PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011), PINMUX_DATA(TIOC0B_MARK, PJ17MD_100), PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101), PINMUX_DATA(PJ16_DATA, PJ16MD_000), PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001), - PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010), + PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010), PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011), PINMUX_DATA(TIOC0A_MARK, PJ16MD_100), PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101), PINMUX_DATA(PJ15_DATA, PJ15MD_000), PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001), - PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010), + PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010), PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011), PINMUX_DATA(PWM2H_MARK, PJ15MD_100), PINMUX_DATA(TXD7_MARK, PJ15MD_101), PINMUX_DATA(PJ14_DATA, PJ14MD_000), PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001), - PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010), + PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010), PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011), PINMUX_DATA(PWM2G_MARK, PJ14MD_100), PINMUX_DATA(TXD6_MARK, PJ14MD_101), PINMUX_DATA(PJ13_DATA, PJ13MD_000), PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001), - PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010), + PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010), PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011), PINMUX_DATA(PWM2F_MARK, PJ13MD_100), PINMUX_DATA(TXD5_MARK, PJ13MD_101), PINMUX_DATA(PJ12_DATA, PJ12MD_000), PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001), - PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010), + PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010), PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011), PINMUX_DATA(PWM2E_MARK, PJ12MD_100), PINMUX_DATA(SCK7_MARK, PJ12MD_101), PINMUX_DATA(PJ11_DATA, PJ11MD_000), PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010), + PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010), PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011), PINMUX_DATA(PWM2D_MARK, PJ11MD_100), PINMUX_DATA(SCK6_MARK, PJ11MD_101), PINMUX_DATA(PJ10_DATA, PJ10MD_000), PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010), + PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010), PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011), PINMUX_DATA(PWM2C_MARK, PJ10MD_100), PINMUX_DATA(SCK5_MARK, PJ10MD_101), PINMUX_DATA(PJ9_DATA, PJ9MD_000), PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010), + PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010), PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011), PINMUX_DATA(PWM2B_MARK, PJ9MD_100), PINMUX_DATA(RTS5_MARK, PJ9MD_101), PINMUX_DATA(PJ8_DATA, PJ8MD_000), PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010), + PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010), PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011), PINMUX_DATA(PWM2A_MARK, PJ8MD_100), PINMUX_DATA(CTS5_MARK, PJ8MD_101), PINMUX_DATA(PJ7_DATA, PJ7MD_000), PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010), + PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010), PINMUX_DATA(SD_D2_MARK, PJ7MD_011), PINMUX_DATA(PWM1H_MARK, PJ7MD_100), PINMUX_DATA(PJ6_DATA, PJ6MD_000), PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010), + PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010), PINMUX_DATA(SD_D3_MARK, PJ6MD_011), PINMUX_DATA(PWM1G_MARK, PJ6MD_100), PINMUX_DATA(PJ5_DATA, PJ5MD_000), PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010), + PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010), PINMUX_DATA(SD_CMD_MARK, PJ5MD_011), PINMUX_DATA(PWM1F_MARK, PJ5MD_100), PINMUX_DATA(PJ4_DATA, PJ4MD_000), PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010), + PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010), PINMUX_DATA(SD_CLK_MARK, PJ4MD_011), PINMUX_DATA(PWM1E_MARK, PJ4MD_100), PINMUX_DATA(PJ3_DATA, PJ3MD_000), PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010), + PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010), PINMUX_DATA(SD_D0_MARK, PJ3MD_011), PINMUX_DATA(PWM1D_MARK, PJ3MD_100), PINMUX_DATA(PJ2_DATA, PJ2MD_000), PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010), + PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010), PINMUX_DATA(SD_D1_MARK, PJ2MD_011), PINMUX_DATA(PWM1C_MARK, PJ2MD_100), PINMUX_DATA(PJ1_DATA, PJ1MD_000), PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010), + PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010), PINMUX_DATA(SD_WP_MARK, PJ1MD_011), PINMUX_DATA(PWM1B_MARK, PJ1MD_100), PINMUX_DATA(PJ0_DATA, PJ0MD_000), PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010), + PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010), PINMUX_DATA(SD_CD_MARK, PJ0MD_011), PINMUX_DATA(PWM1A_MARK, PJ0MD_100), }; @@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = { PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK), + + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), }; diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index c87e78f73234..5f30f805d2f2 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -334,8 +334,8 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), - CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]), - CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]), + CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]), + CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]), CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]), CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]), diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 65786c7f5ded..6a868b091c2d 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -12,6 +12,7 @@ #include <linux/platform_device.h> #include <linux/serial.h> #include <linux/serial_sci.h> +#include <linux/sh_dma.h> #include <linux/sh_timer.h> #include <linux/sh_intc.h> #include <linux/uio_driver.h> diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index a7708425afa9..4a2f357f4df8 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -216,6 +216,20 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x42, }, + { + .slave_id = SHDMA_SLAVE_RSPI_TX, + .addr = 0xfe480004, + .chcr = SM_INC | 0x800 | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc1, + }, + { + .slave_id = SHDMA_SLAVE_RSPI_RX, + .addr = 0xfe480004, + .chcr = DM_INC | 0x800 | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc2, + }, }; static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 7b57bf1dc855..ebe7a7d97215 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p) data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(__bss_start); - bss_resource.end = virt_to_phys(_ebss)-1; + bss_resource.end = virt_to_phys(__bss_stop)-1; #ifdef CONFIG_CMDLINE_OVERWRITE strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 3896f26efa4a..2a0a596ebf67 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(_ebss); EXPORT_SYMBOL(empty_zero_page); #define DECLARE_EXPORT(name) \ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index c98905f71e28..db88cbf9eafd 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -78,7 +78,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; BSS_SECTION(0, PAGE_SIZE, 4) - _ebss = .; /* uClinux MTD sucks */ _end = . ; STABS_DEBUG diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 84a57761f17e..60164e65d665 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -39,7 +39,7 @@ * * Make sure the stack pointer contains a valid address. Valid * addresses for kernel stacks are anywhere after the bss - * (after _ebss) and anywhere in init_thread_union (init_stack). + * (after __bss_stop) and anywhere in init_thread_union (init_stack). */ #define STACK_CHECK() \ mov #(THREAD_SIZE >> 10), r0; \ @@ -60,7 +60,7 @@ cmp/hi r2, r1; \ bf stack_panic; \ \ - /* If sp > _ebss then we're OK. */ \ + /* If sp > __bss_stop then we're OK. */ \ mov.l .L_ebss, r1; \ cmp/hi r1, r15; \ bt 1f; \ @@ -70,7 +70,7 @@ cmp/hs r1, r15; \ bf stack_panic; \ \ - /* If sp > init_stack && sp < _ebss, not OK. */ \ + /* If sp > init_stack && sp < __bss_stop, not OK. */ \ add r0, r1; \ cmp/hs r1, r15; \ bt stack_panic; \ @@ -292,8 +292,6 @@ stack_panic: nop .align 2 -.L_ebss: - .long _ebss .L_init_thread_union: .long init_thread_union .Lpanic: diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 1fc25d85e515..3bdc1ad9a341 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -58,11 +58,15 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; - if (mm) + if (mm) { pgd = mm->pgd; - else + } else { pgd = get_TTB(); + if (unlikely(!pgd)) + pgd = swapper_pg_dir; + } + printk(KERN_ALERT "pgd = %p\n", pgd); pgd += pgd_index(addr); printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 0dc1f5786081..11c6c9603e71 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -502,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 6026fdd1b2ed..d58edf5fefdb 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/arch/um/defconfig b/arch/um/defconfig index fec0d5d27460..08107a795062 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig @@ -163,7 +163,7 @@ CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y # CONFIG_CFS_BANDWIDTH is not set # CONFIG_RT_GROUP_SCHED is not set -CONFIG_BLK_CGROUP=m +CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 45e248c2f43c..87eebfe03c61 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -150,9 +150,11 @@ void chan_enable_winch(struct chan *chan, struct tty_struct *tty) static void line_timer_cb(struct work_struct *work) { struct line *line = container_of(work, struct line, task.work); + struct tty_struct *tty = tty_port_tty_get(&line->port); if (!line->throttled) - chan_interrupt(line, line->tty, line->driver->read_irq); + chan_interrupt(line, tty, line->driver->read_irq); + tty_kref_put(tty); } int enable_chan(struct line *line) diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index ac9d25c8dc01..bbaf2c59830a 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -19,9 +19,11 @@ static irqreturn_t line_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; + struct tty_struct *tty = tty_port_tty_get(&line->port); if (line) - chan_interrupt(line, line->tty, irq); + chan_interrupt(line, tty, irq); + tty_kref_put(tty); return IRQ_HANDLED; } @@ -219,92 +221,6 @@ void line_set_termios(struct tty_struct *tty, struct ktermios * old) /* nothing */ } -static const struct { - int cmd; - char *level; - char *name; -} tty_ioctls[] = { - /* don't print these, they flood the log ... */ - { TCGETS, NULL, "TCGETS" }, - { TCSETS, NULL, "TCSETS" }, - { TCSETSW, NULL, "TCSETSW" }, - { TCFLSH, NULL, "TCFLSH" }, - { TCSBRK, NULL, "TCSBRK" }, - - /* general tty stuff */ - { TCSETSF, KERN_DEBUG, "TCSETSF" }, - { TCGETA, KERN_DEBUG, "TCGETA" }, - { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, - { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, - { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, - - /* linux-specific ones */ - { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, - { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, - { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, - { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, -}; - -int line_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg) -{ - int ret; - int i; - - ret = 0; - switch(cmd) { -#ifdef TIOCGETP - case TIOCGETP: - case TIOCSETP: - case TIOCSETN: -#endif -#ifdef TIOCGETC - case TIOCGETC: - case TIOCSETC: -#endif -#ifdef TIOCGLTC - case TIOCGLTC: - case TIOCSLTC: -#endif - /* Note: these are out of date as we now have TCGETS2 etc but this - whole lot should probably go away */ - case TCGETS: - case TCSETSF: - case TCSETSW: - case TCSETS: - case TCGETA: - case TCSETAF: - case TCSETAW: - case TCSETA: - case TCXONC: - case TCFLSH: - case TIOCOUTQ: - case TIOCINQ: - case TIOCGLCKTRMIOS: - case TIOCSLCKTRMIOS: - case TIOCPKT: - case TIOCGSOFTCAR: - case TIOCSSOFTCAR: - return -ENOIOCTLCMD; -#if 0 - case TCwhatever: - /* do something */ - break; -#endif - default: - for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) - if (cmd == tty_ioctls[i].cmd) - break; - if (i == ARRAY_SIZE(tty_ioctls)) { - printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", - __func__, tty->name, cmd); - } - ret = -ENOIOCTLCMD; - break; - } - return ret; -} - void line_throttle(struct tty_struct *tty) { struct line *line = tty->driver_data; @@ -333,7 +249,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; - struct tty_struct *tty = line->tty; + struct tty_struct *tty; int err; /* @@ -352,10 +268,13 @@ static irqreturn_t line_write_interrupt(int irq, void *data) } spin_unlock(&line->lock); + tty = tty_port_tty_get(&line->port); if (tty == NULL) return IRQ_NONE; tty_wakeup(tty); + tty_kref_put(tty); + return IRQ_HANDLED; } @@ -377,43 +296,14 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) return err; } -/* - * Normally, a driver like this can rely mostly on the tty layer - * locking, particularly when it comes to the driver structure. - * However, in this case, mconsole requests can come in "from the - * side", and race with opens and closes. - * - * mconsole config requests will want to be sure the device isn't in - * use, and get_config, open, and close will want a stable - * configuration. The checking and modification of the configuration - * is done under a spinlock. Checking whether the device is in use is - * line->tty->count > 1, also under the spinlock. - * - * line->count serves to decide whether the device should be enabled or - * disabled on the host. If it's equal to 0, then we are doing the - * first open or last close. Otherwise, open and close just return. - */ - -int line_open(struct line *lines, struct tty_struct *tty) +static int line_activate(struct tty_port *port, struct tty_struct *tty) { - struct line *line = &lines[tty->index]; - int err = -ENODEV; - - mutex_lock(&line->count_lock); - if (!line->valid) - goto out_unlock; - - err = 0; - if (line->count++) - goto out_unlock; - - BUG_ON(tty->driver_data); - tty->driver_data = line; - line->tty = tty; + int ret; + struct line *line = tty->driver_data; - err = enable_chan(line); - if (err) /* line_close() will be called by our caller */ - goto out_unlock; + ret = enable_chan(line); + if (ret) + return ret; if (!line->sigio) { chan_enable_winch(line->chan_out, tty); @@ -421,44 +311,60 @@ int line_open(struct line *lines, struct tty_struct *tty) } chan_window_size(line, &tty->winsize.ws_row, - &tty->winsize.ws_col); -out_unlock: - mutex_unlock(&line->count_lock); - return err; + &tty->winsize.ws_col); + + return 0; } -static void unregister_winch(struct tty_struct *tty); +static const struct tty_port_operations line_port_ops = { + .activate = line_activate, +}; -void line_close(struct tty_struct *tty, struct file * filp) +int line_open(struct tty_struct *tty, struct file *filp) { struct line *line = tty->driver_data; - /* - * If line_open fails (and tty->driver_data is never set), - * tty_open will call line_close. So just return in this case. - */ - if (line == NULL) - return; + return tty_port_open(&line->port, tty, filp); +} - /* We ignore the error anyway! */ - flush_buffer(line); +int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line) +{ + int ret; - mutex_lock(&line->count_lock); - BUG_ON(!line->valid); + ret = tty_standard_install(driver, tty); + if (ret) + return ret; - if (--line->count) - goto out_unlock; + tty->driver_data = line; - line->tty = NULL; - tty->driver_data = NULL; + return 0; +} + +static void unregister_winch(struct tty_struct *tty); + +void line_cleanup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; if (line->sigio) { unregister_winch(tty); line->sigio = 0; } +} + +void line_close(struct tty_struct *tty, struct file * filp) +{ + struct line *line = tty->driver_data; -out_unlock: - mutex_unlock(&line->count_lock); + tty_port_close(&line->port, tty, filp); +} + +void line_hangup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + tty_port_hangup(&line->port); } void close_lines(struct line *lines, int nlines) @@ -476,9 +382,7 @@ int setup_one_line(struct line *lines, int n, char *init, struct tty_driver *driver = line->driver->driver; int err = -EINVAL; - mutex_lock(&line->count_lock); - - if (line->count) { + if (line->port.count) { *error_out = "Device is already open"; goto out; } @@ -519,7 +423,6 @@ int setup_one_line(struct line *lines, int n, char *init, } } out: - mutex_unlock(&line->count_lock); return err; } @@ -607,13 +510,17 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str, line = &lines[dev]; - mutex_lock(&line->count_lock); if (!line->valid) CONFIG_CHUNK(str, size, n, "none", 1); - else if (line->tty == NULL) - CONFIG_CHUNK(str, size, n, line->init_str, 1); - else n = chan_config_string(line, str, size, error_out); - mutex_unlock(&line->count_lock); + else { + struct tty_struct *tty = tty_port_tty_get(&line->port); + if (tty == NULL) { + CONFIG_CHUNK(str, size, n, line->init_str, 1); + } else { + n = chan_config_string(line, str, size, error_out); + tty_kref_put(tty); + } + } return n; } @@ -663,8 +570,9 @@ int register_lines(struct line_driver *line_driver, driver->init_termios = tty_std_termios; for (i = 0; i < nlines; i++) { + tty_port_init(&lines[i].port); + lines[i].port.ops = &line_port_ops; spin_lock_init(&lines[i].lock); - mutex_init(&lines[i].count_lock); lines[i].driver = line_driver; INIT_LIST_HEAD(&lines[i].chan_list); } diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h index 0a1834719dba..bae95611e7ab 100644 --- a/arch/um/drivers/line.h +++ b/arch/um/drivers/line.h @@ -32,9 +32,7 @@ struct line_driver { }; struct line { - struct tty_struct *tty; - struct mutex count_lock; - unsigned long count; + struct tty_port port; int valid; char *init_str; @@ -59,7 +57,11 @@ struct line { }; extern void line_close(struct tty_struct *tty, struct file * filp); -extern int line_open(struct line *lines, struct tty_struct *tty); +extern int line_open(struct tty_struct *tty, struct file *filp); +extern int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line); +extern void line_cleanup(struct tty_struct *tty); +extern void line_hangup(struct tty_struct *tty); extern int line_setup(char **conf, unsigned nlines, char **def, char *init, char *name); extern int line_write(struct tty_struct *tty, const unsigned char *buf, @@ -70,8 +72,6 @@ extern int line_chars_in_buffer(struct tty_struct *tty); extern void line_flush_buffer(struct tty_struct *tty); extern void line_flush_chars(struct tty_struct *tty); extern int line_write_room(struct tty_struct *tty); -extern int line_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg); extern void line_throttle(struct tty_struct *tty); extern void line_unthrottle(struct tty_struct *tty); diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index e09801a1327b..7e86f0070123 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c @@ -87,40 +87,13 @@ static int ssl_remove(int n, char **error_out) error_out); } -static int ssl_open(struct tty_struct *tty, struct file *filp) -{ - int err = line_open(serial_lines, tty); - - if (err) - printk(KERN_ERR "Failed to open serial line %d, err = %d\n", - tty->index, err); - - return err; -} - -#if 0 -static void ssl_flush_buffer(struct tty_struct *tty) -{ - return; -} - -static void ssl_stop(struct tty_struct *tty) -{ - printk(KERN_ERR "Someone should implement ssl_stop\n"); -} - -static void ssl_start(struct tty_struct *tty) -{ - printk(KERN_ERR "Someone should implement ssl_start\n"); -} - -void ssl_hangup(struct tty_struct *tty) +static int ssl_install(struct tty_driver *driver, struct tty_struct *tty) { + return line_install(driver, tty, &serial_lines[tty->index]); } -#endif static const struct tty_operations ssl_ops = { - .open = ssl_open, + .open = line_open, .close = line_close, .write = line_write, .put_char = line_put_char, @@ -129,14 +102,11 @@ static const struct tty_operations ssl_ops = { .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, - .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, -#if 0 - .stop = ssl_stop, - .start = ssl_start, - .hangup = ssl_hangup, -#endif + .install = ssl_install, + .cleanup = line_cleanup, + .hangup = line_hangup, }; /* Changed by ssl_init and referenced by ssl_exit, which are both serialized diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c index 7663541c372e..929b99a261f3 100644 --- a/arch/um/drivers/stdio_console.c +++ b/arch/um/drivers/stdio_console.c @@ -89,21 +89,17 @@ static int con_remove(int n, char **error_out) return line_remove(vts, ARRAY_SIZE(vts), n, error_out); } -static int con_open(struct tty_struct *tty, struct file *filp) -{ - int err = line_open(vts, tty); - if (err) - printk(KERN_ERR "Failed to open console %d, err = %d\n", - tty->index, err); - - return err; -} - /* Set in an initcall, checked in an exitcall */ static int con_init_done = 0; +static int con_install(struct tty_driver *driver, struct tty_struct *tty) +{ + return line_install(driver, tty, &vts[tty->index]); +} + static const struct tty_operations console_ops = { - .open = con_open, + .open = line_open, + .install = con_install, .close = line_close, .write = line_write, .put_char = line_put_char, @@ -112,9 +108,10 @@ static const struct tty_operations console_ops = { .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, - .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, + .cleanup = line_cleanup, + .hangup = line_hangup, }; static void uml_console_write(struct console *console, const char *string, diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 20505cafa299..0643e5bc9f41 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -514,7 +514,7 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) goto out; } - fd = os_open_file(ubd_dev->file, global_openflags, 0); + fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); if (fd < 0) return fd; diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h index e786a6a3ec5e..442f1d025dc2 100644 --- a/arch/um/include/asm/ptrace-generic.h +++ b/arch/um/include/asm/ptrace-generic.h @@ -37,6 +37,8 @@ extern int putreg(struct task_struct *child, int regno, unsigned long value); extern int arch_copy_tls(struct task_struct *new); extern void clear_flushed_tls(struct task_struct *task); +extern void syscall_trace_enter(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); #endif diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h index 896e16602176..86daa5461815 100644 --- a/arch/um/include/shared/as-layout.h +++ b/arch/um/include/shared/as-layout.h @@ -60,7 +60,8 @@ extern unsigned long host_task_size; extern int linux_main(int argc, char **argv); -extern void (*sig_info[])(int, struct uml_pt_regs *); +struct siginfo; +extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *); #endif diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h index c6c784df2673..2b6d703925b5 100644 --- a/arch/um/include/shared/irq_user.h +++ b/arch/um/include/shared/irq_user.h @@ -20,7 +20,8 @@ struct irq_fd { enum { IRQ_READ, IRQ_WRITE }; -extern void sigio_handler(int sig, struct uml_pt_regs *regs); +struct siginfo; +extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern void free_irq_by_fd(int fd); extern void reactivate_fd(int fd, int irqnum); extern void deactivate_fd(int fd, int irqnum); diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h index 00965d06d2ca..af6b6dc868ba 100644 --- a/arch/um/include/shared/kern_util.h +++ b/arch/um/include/shared/kern_util.h @@ -9,6 +9,8 @@ #include "sysdep/ptrace.h" #include "sysdep/faultinfo.h" +struct siginfo; + extern int uml_exitcode; extern int ncpus; @@ -22,7 +24,7 @@ extern void free_stack(unsigned long stack, int order); extern int do_signal(void); extern void interrupt_end(void); -extern void relay_signal(int sig, struct uml_pt_regs *regs); +extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs); extern unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, struct uml_pt_regs *regs); @@ -33,9 +35,8 @@ extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs); extern int smp_sigio_handler(void); extern void initial_thread_cb(void (*proc)(void *), void *arg); extern int is_syscall(unsigned long addr); -extern void timer_handler(int sig, struct uml_pt_regs *regs); -extern void timer_handler(int sig, struct uml_pt_regs *regs); +extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern int start_uml(void); extern void paging_init(void); @@ -59,9 +60,9 @@ extern unsigned long from_irq_stack(int nested); extern void syscall_trace(struct uml_pt_regs *regs, int entryexit); extern int singlestepping(void *t); -extern void segv_handler(int sig, struct uml_pt_regs *regs); -extern void bus_handler(int sig, struct uml_pt_regs *regs); -extern void winch(int sig, struct uml_pt_regs *regs); +extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); +extern void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs); +extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern void fatal_sigsegv(void) __attribute__ ((noreturn)); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 00506c3d5d6e..9883026f0730 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -30,7 +30,7 @@ static struct irq_fd **last_irq_ptr = &active_fds; extern void free_irqs(void); -void sigio_handler(int sig, struct uml_pt_regs *regs) +void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct irq_fd *irq_fd; int n; diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index ccb9a9d283f1..57fc7028714a 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -151,12 +151,10 @@ void new_thread_handler(void) * 0 if it just exits */ n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); - if (n == 1) { - /* Handle any immediate reschedules or signals */ - interrupt_end(); + if (n == 1) userspace(¤t->thread.regs.regs); - } - else do_exit(0); + else + do_exit(0); } /* Called magically, see new_thread_handler above */ @@ -175,9 +173,6 @@ void fork_handler(void) current->thread.prev_sched = NULL; - /* Handle any immediate reschedules or signals */ - interrupt_end(); - userspace(¤t->thread.regs.regs); } @@ -193,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, if (current->thread.forking) { memcpy(&p->thread.regs.regs, ®s->regs, sizeof(p->thread.regs.regs)); - UPT_SET_SYSCALL_RETURN(&p->thread.regs.regs, 0); + PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); if (sp != 0) REGS_SP(p->thread.regs.regs.gp) = sp; diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index 06b190390505..694d551c8899 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -3,11 +3,12 @@ * Licensed under the GPL */ -#include "linux/audit.h" -#include "linux/ptrace.h" -#include "linux/sched.h" -#include "asm/uaccess.h" -#include "skas_ptrace.h" +#include <linux/audit.h> +#include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/tracehook.h> +#include <asm/uaccess.h> +#include <skas_ptrace.h> @@ -162,48 +163,36 @@ static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check */ -void syscall_trace(struct uml_pt_regs *regs, int entryexit) +void syscall_trace_enter(struct pt_regs *regs) { - int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit; - int tracesysgood; - - if (!entryexit) - audit_syscall_entry(HOST_AUDIT_ARCH, - UPT_SYSCALL_NR(regs), - UPT_SYSCALL_ARG1(regs), - UPT_SYSCALL_ARG2(regs), - UPT_SYSCALL_ARG3(regs), - UPT_SYSCALL_ARG4(regs)); - else - audit_syscall_exit(regs); - - /* Fake a debug trap */ - if (is_singlestep) - send_sigtrap(current, regs, 0); + audit_syscall_entry(HOST_AUDIT_ARCH, + UPT_SYSCALL_NR(®s->regs), + UPT_SYSCALL_ARG1(®s->regs), + UPT_SYSCALL_ARG2(®s->regs), + UPT_SYSCALL_ARG3(®s->regs), + UPT_SYSCALL_ARG4(®s->regs)); if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - if (!(current->ptrace & PT_PTRACED)) - return; + tracehook_report_syscall_entry(regs); +} - /* - * the 0x80 provides a way for the tracing parent to distinguish - * between a syscall stop and SIGTRAP delivery - */ - tracesysgood = (current->ptrace & PT_TRACESYSGOOD); - ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0)); +void syscall_trace_leave(struct pt_regs *regs) +{ + int ptraced = current->ptrace; - if (entryexit) /* force do_signal() --> is_syscall() */ - set_thread_flag(TIF_SIGPENDING); + audit_syscall_exit(regs); - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + /* Fake a debug trap */ + if (ptraced & PT_DTRACE) + send_sigtrap(current, ®s->regs, 0); + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + tracehook_report_syscall_exit(regs, 0); + /* force do_signal() --> is_syscall() */ + if (ptraced & PT_PTRACED) + set_thread_flag(TIF_SIGPENDING); } diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c index 05fbeb480e0b..86368a025a96 100644 --- a/arch/um/kernel/skas/syscall.c +++ b/arch/um/kernel/skas/syscall.c @@ -18,7 +18,7 @@ void handle_syscall(struct uml_pt_regs *r) long result; int syscall; - syscall_trace(r, 0); + syscall_trace_enter(regs); /* * This should go in the declaration of syscall, but when I do that, @@ -34,7 +34,7 @@ void handle_syscall(struct uml_pt_regs *r) result = -ENOSYS; else result = EXECUTE_SYSCALL(syscall, regs); - UPT_SET_SYSCALL_RETURN(r, result); + PT_REGS_SET_SYSCALL_RETURN(regs, result); - syscall_trace(r, 1); + syscall_trace_leave(regs); } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index d1a23fb3190d..5f76d4ba151c 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -13,7 +13,7 @@ #include "kern_util.h" #include "os.h" -void timer_handler(int sig, struct uml_pt_regs *regs) +void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { unsigned long flags; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 3be60765c0e2..0353b98ae35a 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -172,7 +172,7 @@ void fatal_sigsegv(void) os_dump_core(); } -void segv_handler(int sig, struct uml_pt_regs *regs) +void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct faultinfo * fi = UPT_FAULTINFO(regs); @@ -258,8 +258,11 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, return 0; } -void relay_signal(int sig, struct uml_pt_regs *regs) +void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) { + struct faultinfo *fi; + struct siginfo clean_si; + if (!UPT_IS_USER(regs)) { if (sig == SIGBUS) printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " @@ -269,18 +272,40 @@ void relay_signal(int sig, struct uml_pt_regs *regs) arch_examine_signal(sig, regs); - current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); - force_sig(sig, current); + memset(&clean_si, 0, sizeof(clean_si)); + clean_si.si_signo = si->si_signo; + clean_si.si_errno = si->si_errno; + clean_si.si_code = si->si_code; + switch (sig) { + case SIGILL: + case SIGFPE: + case SIGSEGV: + case SIGBUS: + case SIGTRAP: + fi = UPT_FAULTINFO(regs); + clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi); + current->thread.arch.faultinfo = *fi; +#ifdef __ARCH_SI_TRAPNO + clean_si.si_trapno = si->si_trapno; +#endif + break; + default: + printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n", + sig, si->si_code); + } + + force_sig_info(sig, &clean_si, current); } -void bus_handler(int sig, struct uml_pt_regs *regs) +void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) { if (current->thread.fault_catcher != NULL) UML_LONGJMP(current->thread.fault_catcher, 1); - else relay_signal(sig, regs); + else + relay_signal(sig, si, regs); } -void winch(int sig, struct uml_pt_regs *regs) +void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { do_IRQ(WINCH_IRQ, regs); } diff --git a/arch/um/os-Linux/internal.h b/arch/um/os-Linux/internal.h index 2c3c3ecd8c01..0dc2c9f135f6 100644 --- a/arch/um/os-Linux/internal.h +++ b/arch/um/os-Linux/internal.h @@ -1 +1 @@ -void alarm_handler(int, mcontext_t *); +void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc); diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 2d22f1fcd8e2..6366ce904b9b 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -13,8 +13,9 @@ #include "kern_util.h" #include "os.h" #include "sysdep/mcontext.h" +#include "internal.h" -void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { +void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, [SIGFPE] = relay_signal, [SIGILL] = relay_signal, @@ -24,7 +25,7 @@ void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { [SIGIO] = sigio_handler, [SIGVTALRM] = timer_handler }; -static void sig_handler_common(int sig, mcontext_t *mc) +static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc) { struct uml_pt_regs r; int save_errno = errno; @@ -40,7 +41,7 @@ static void sig_handler_common(int sig, mcontext_t *mc) if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM)) unblock_signals(); - (*sig_info[sig])(sig, &r); + (*sig_info[sig])(sig, si, &r); errno = save_errno; } @@ -60,7 +61,7 @@ static void sig_handler_common(int sig, mcontext_t *mc) static int signals_enabled; static unsigned int signals_pending; -void sig_handler(int sig, mcontext_t *mc) +void sig_handler(int sig, siginfo_t *si, mcontext_t *mc) { int enabled; @@ -72,7 +73,7 @@ void sig_handler(int sig, mcontext_t *mc) block_signals(); - sig_handler_common(sig, mc); + sig_handler_common(sig, si, mc); set_signals(enabled); } @@ -85,10 +86,10 @@ static void real_alarm_handler(mcontext_t *mc) get_regs_from_mc(®s, mc); regs.is_user = 0; unblock_signals(); - timer_handler(SIGVTALRM, ®s); + timer_handler(SIGVTALRM, NULL, ®s); } -void alarm_handler(int sig, mcontext_t *mc) +void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) { int enabled; @@ -119,7 +120,7 @@ void set_sigstack(void *sig_stack, int size) panic("enabling signal stack failed, errno = %d\n", errno); } -static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { +static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = { [SIGSEGV] = sig_handler, [SIGBUS] = sig_handler, [SIGILL] = sig_handler, @@ -132,7 +133,7 @@ static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { }; -static void hard_handler(int sig, siginfo_t *info, void *p) +static void hard_handler(int sig, siginfo_t *si, void *p) { struct ucontext *uc = p; mcontext_t *mc = &uc->uc_mcontext; @@ -161,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *info, void *p) while ((sig = ffs(pending)) != 0){ sig--; pending &= ~(1 << sig); - (*handlers[sig])(sig, mc); + (*handlers[sig])(sig, si, mc); } /* @@ -273,9 +274,12 @@ void unblock_signals(void) * Deal with SIGIO first because the alarm handler might * schedule, leaving the pending SIGIO stranded until we come * back here. + * + * SIGIO's handler doesn't use siginfo or mcontext, + * so they can be NULL. */ if (save_pending & SIGIO_MASK) - sig_handler_common(SIGIO, NULL); + sig_handler_common(SIGIO, NULL, NULL); if (save_pending & SIGVTALRM_MASK) real_alarm_handler(NULL); diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index cd65727854eb..d93bb40499f7 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -346,6 +346,10 @@ void userspace(struct uml_pt_regs *regs) int err, status, op, pid = userspace_pid[0]; /* To prevent races if using_sysemu changes under us.*/ int local_using_sysemu; + siginfo_t si; + + /* Handle any immediate reschedules or signals */ + interrupt_end(); if (getitimer(ITIMER_VIRTUAL, &timer)) printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno); @@ -404,13 +408,17 @@ void userspace(struct uml_pt_regs *regs) if (WIFSTOPPED(status)) { int sig = WSTOPSIG(status); + + ptrace(PTRACE_GETSIGINFO, pid, 0, &si); + switch (sig) { case SIGSEGV: if (PTRACE_FULL_FAULTINFO || !ptrace_faultinfo) { get_skas_faultinfo(pid, ®s->faultinfo); - (*sig_info[SIGSEGV])(SIGSEGV, regs); + (*sig_info[SIGSEGV])(SIGSEGV, &si, + regs); } else handle_segv(pid, regs); break; @@ -418,14 +426,14 @@ void userspace(struct uml_pt_regs *regs) handle_trap(pid, regs, local_using_sysemu); break; case SIGTRAP: - relay_signal(SIGTRAP, regs); + relay_signal(SIGTRAP, &si, regs); break; case SIGVTALRM: now = os_nsecs(); if (now < nsecs) break; block_signals(); - (*sig_info[sig])(sig, regs); + (*sig_info[sig])(sig, &si, regs); unblock_signals(); nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + @@ -439,7 +447,7 @@ void userspace(struct uml_pt_regs *regs) case SIGFPE: case SIGWINCH: block_signals(); - (*sig_info[sig])(sig, regs); + (*sig_info[sig])(sig, &si, regs); unblock_signals(); break; default: diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index 910499d76a67..f60238559af3 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c @@ -87,7 +87,7 @@ static int after_sleep_interval(struct timespec *ts) static void deliver_alarm(void) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); } static unsigned long long sleep_time(unsigned long long nsecs) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ba2657c49217..8ec3a1aa4abd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1527,7 +1527,7 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + bool "Enable -fstack-protector buffer overflow detection" ---help--- This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of functions, a canary value on diff --git a/arch/x86/Makefile b/arch/x86/Makefile index b0c5276861ec..682e9c210baa 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y) KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return + # Never want PIC in a 32-bit kernel, prevent breakage with GCC built + # with nonstandard options + KBUILD_CFLAGS += -fno-pic + # prevent gcc from keeping the stack 16 byte aligned KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 5a747dd884db..f7535bedc33f 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/code16gcc.h \ - -fno-strict-aliasing -fomit-frame-pointer \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ $(call cc-option, -fno-unit-at-a-time)) \ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 441520e4174f..a3ac52b29cbf 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -33,6 +33,14 @@ #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ #define MCI_STATUS_AR (1ULL<<55) /* Action required */ +#define MCACOD 0xffff /* MCA Error Code */ + +/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ +#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ +#define MCACOD_SCRUBMSK 0xfff0 +#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ +#define MCACOD_DATA 0x0134 /* Data Load */ +#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ /* MCi_MISC register defines */ #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h index 87bdbca72f94..72f9adf6eca4 100644 --- a/arch/x86/include/asm/olpc.h +++ b/arch/x86/include/asm/olpc.h @@ -100,25 +100,6 @@ extern void olpc_xo1_pm_wakeup_clear(u16 value); extern int pci_olpc_init(void); -/* EC related functions */ - -extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, - unsigned char *outbuf, size_t outlen); - -/* EC commands */ - -#define EC_FIRMWARE_REV 0x08 -#define EC_WRITE_SCI_MASK 0x1b -#define EC_WAKE_UP_WLAN 0x24 -#define EC_WLAN_LEAVE_RESET 0x25 -#define EC_READ_EB_MODE 0x2a -#define EC_SET_SCI_INHIBIT 0x32 -#define EC_SET_SCI_INHIBIT_RELEASE 0x34 -#define EC_WLAN_ENTER_RESET 0x35 -#define EC_WRITE_EXT_SCI_MASK 0x38 -#define EC_SCI_QUERY 0x84 -#define EC_EXT_SCI_QUERY 0x85 - /* SCI source values */ #define EC_SCI_SRC_EMPTY 0x00 diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index dab39350e51e..cb4e43bce98a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; } extern void perf_events_lapic_init(void); /* - * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. - * This flag is otherwise unused and ABI specified to be 0, so nobody should - * care what we do with it. + * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise + * unused and ABI specified to be 0, so nobody should care what we do with + * them. + * + * EXACT - the IP points to the exact instruction that triggered the + * event (HW bugs exempt). + * VM - original X86_VM_MASK; see set_linear_ip(). */ #define PERF_EFLAGS_EXACT (1UL << 3) +#define PERF_EFLAGS_VM (1UL << 5) struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 95bf99de9058..1b8e5a03d942 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags; static char temp_stack[4096]; #endif -asmlinkage void acpi_enter_s3(void) -{ - acpi_enter_sleep_state(3, wake_sleep_flags); -} /** * acpi_suspend_lowlevel - save kernel state * diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 5653a5791ec9..67f59f8c6956 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h @@ -2,7 +2,6 @@ * Variables and functions used by the code in sleep.c */ -#include <linux/linkage.h> #include <asm/realmode.h> extern unsigned long saved_video_mode; @@ -11,7 +10,6 @@ extern long saved_magic; extern int wakeup_pmode_return; extern u8 wake_sleep_flags; -extern asmlinkage void acpi_enter_s3(void); extern unsigned long acpi_copy_wakeup_routine(unsigned long); extern void wakeup_long64(void); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 72610839f03b..13ab720573e3 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -74,7 +74,9 @@ restore_registers: ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers - call acpi_enter_s3 + pushl $3 + call acpi_enter_sleep_state + addl $4, %esp # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 014d1d28c397..8ea5164cbd04 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel) movq %rsi, saved_rsi addq $8, %rsp - call acpi_enter_s3 + movl $3, %edi + xorl %eax, %eax + call acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp resume_point diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 931280ff8299..afb7ff79a29f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void) ideal_nops = intel_nops; #endif } - + break; default: #ifdef CONFIG_X86_64 ideal_nops = k8_nops; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 406eee784684..c265593ec2cd 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) BUG_ON(!cfg->vector); vector = cfg->vector; - for_each_cpu(cpu, cfg->domain) + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; @@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; - for_each_cpu(cpu, cfg->old_domain) { + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) @@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, if (!IO_APIC_IRQ(irq)) return; + /* + * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC + * can handle this irq and the apic driver is finialized at this point, + * update the cfg->domain. + */ + if (irq < legacy_pic->nr_legacy_irqs && + cpumask_equal(cfg->domain, cpumask_of(0))) + apic->vector_allocation_domain(0, cfg->domain, + apic->target_cpus()); + if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 46d8786d655e..a5fbc3c5fccc 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_AVX); + setup_clear_cpu_cap(X86_FEATURE_AVX2); return 1; } __setup("noxsave", x86_xsave_setup); diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 413c2ced887c..13017626f9a8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -55,13 +55,6 @@ static struct severity { #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) -#define MCACOD 0xffff -/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ -#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ -#define MCACOD_SCRUBMSK 0xfff0 -#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ -#define MCACOD_DATA 0x0134 /* Data Load */ -#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ MCESEV( NO, "Invalid", diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5e095f873e3e..292d0258311c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -103,6 +103,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { static DEFINE_PER_CPU(struct work_struct, mce_work); +static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -650,14 +652,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll); * Do a quick check if any of the events requires a panic. * This decides if we keep the events around or clear them. */ -static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp) +static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, + struct pt_regs *regs) { int i, ret = 0; for (i = 0; i < banks; i++) { m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); - if (m->status & MCI_STATUS_VAL) + if (m->status & MCI_STATUS_VAL) { __set_bit(i, validp); + if (quirk_no_way_out) + quirk_no_way_out(i, m, regs); + } if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) ret = 1; } @@ -1040,7 +1046,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) *final = m; memset(valid_banks, 0, sizeof(valid_banks)); - no_way_out = mce_no_way_out(&m, &msg, valid_banks); + no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); barrier(); @@ -1418,6 +1424,34 @@ static void __mcheck_cpu_init_generic(void) } } +/* + * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and + * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM + * Vol 3B Table 15-20). But this confuses both the code that determines + * whether the machine check occurred in kernel or user mode, and also + * the severity assessment code. Pretend that EIPV was set, and take the + * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. + */ +static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) +{ + if (bank != 0) + return; + if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) + return; + if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| + MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| + MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| + MCACOD)) != + (MCI_STATUS_UC|MCI_STATUS_EN| + MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| + MCI_STATUS_AR|MCACOD_INSTR)) + return; + + m->mcgstatus |= MCG_STATUS_EIPV; + m->ip = regs->ip; + m->cs = regs->cs; +} + /* Add per CPU specific workarounds here */ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { @@ -1515,6 +1549,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) */ if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) mce_bootlog = 0; + + if (c->x86 == 6 && c->x86_model == 45) + quirk_no_way_out = quirk_sandybridge_ifu; } if (monarch_timeout < 0) monarch_timeout = 0; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 29557aa06dda..915b876edd1e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -32,6 +32,8 @@ #include <asm/smp.h> #include <asm/alternative.h> #include <asm/timer.h> +#include <asm/desc.h> +#include <asm/ldt.h> #include "perf_event.h" @@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size) return (__range_not_ok(fp, size, TASK_SIZE) == 0); } +static unsigned long get_segment_base(unsigned int segment) +{ + struct desc_struct *desc; + int idx = segment >> 3; + + if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + if (idx > LDT_ENTRIES) + return 0; + + if (idx > current->active_mm->context.size) + return 0; + + desc = current->active_mm->context.ldt; + } else { + if (idx > GDT_ENTRIES) + return 0; + + desc = __this_cpu_ptr(&gdt_page.gdt[0]); + } + + return get_desc_base(desc + idx); +} + #ifdef CONFIG_COMPAT #include <asm/compat.h> @@ -1746,13 +1771,17 @@ static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) { /* 32-bit process in 64-bit kernel. */ + unsigned long ss_base, cs_base; struct stack_frame_ia32 frame; const void __user *fp; if (!test_thread_flag(TIF_IA32)) return 0; - fp = compat_ptr(regs->bp); + cs_base = get_segment_base(regs->cs); + ss_base = get_segment_base(regs->ss); + + fp = compat_ptr(ss_base + regs->bp); while (entry->nr < PERF_MAX_STACK_DEPTH) { unsigned long bytes; frame.next_frame = 0; @@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (!valid_user_frame(fp, sizeof(frame))) break; - perf_callchain_store(entry, frame.return_address); - fp = compat_ptr(frame.next_frame); + perf_callchain_store(entry, cs_base + frame.return_address); + fp = compat_ptr(ss_base + frame.next_frame); } return 1; } @@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) return; } + /* + * We don't know what to do with VM86 stacks.. ignore them for now. + */ + if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) + return; + fp = (void __user *)regs->bp; perf_callchain_store(entry, regs->ip); @@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) } } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +/* + * Deal with code segment offsets for the various execution modes: + * + * VM86 - the good olde 16 bit days, where the linear address is + * 20 bits and we use regs->ip + 0x10 * regs->cs. + * + * IA32 - Where we need to look at GDT/LDT segment descriptor tables + * to figure out what the 32bit base address is. + * + * X32 - has TIF_X32 set, but is running in x86_64 + * + * X86_64 - CS,DS,SS,ES are all zero based. + */ +static unsigned long code_segment_base(struct pt_regs *regs) { - unsigned long ip; + /* + * If we are in VM86 mode, add the segment offset to convert to a + * linear address. + */ + if (regs->flags & X86_VM_MASK) + return 0x10 * regs->cs; + + /* + * For IA32 we look at the GDT/LDT segment base to convert the + * effective IP to a linear address. + */ +#ifdef CONFIG_X86_32 + if (user_mode(regs) && regs->cs != __USER_CS) + return get_segment_base(regs->cs); +#else + if (test_thread_flag(TIF_IA32)) { + if (user_mode(regs) && regs->cs != __USER32_CS) + return get_segment_base(regs->cs); + } +#endif + return 0; +} +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - ip = perf_guest_cbs->get_guest_ip(); - else - ip = instruction_pointer(regs); + return perf_guest_cbs->get_guest_ip(); - return ip; + return regs->ip + code_segment_base(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) @@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { - if (!kernel_ip(regs->ip)) + if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 821d53b696d1..6605a81ba339 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip) #endif } +/* + * Not all PMUs provide the right context information to place the reported IP + * into full context. Specifically segment registers are typically not + * supplied. + * + * Assuming the address is a linear address (it is for IBS), we fake the CS and + * vm86 mode using the known zero-based code segment and 'fix up' the registers + * to reflect this. + * + * Intel PEBS/LBR appear to typically provide the effective address, nothing + * much we can do about that but pray and treat it like a linear address. + */ +static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) +{ + regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; + if (regs->flags & X86_VM_MASK) + regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); + regs->ip = ip; +} + #ifdef CONFIG_CPU_SUP_AMD int amd_pmu_init(void); diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index da9bcdcd9856..7bfb5bec8630 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -13,6 +13,8 @@ #include <asm/apic.h> +#include "perf_event.h" + static u32 ibs_caps; #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) @@ -536,7 +538,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) { regs.flags &= ~PERF_EFLAGS_EXACT; } else { - instruction_pointer_set(®s, ibs_data.regs[1]); + set_linear_ip(®s, ibs_data.regs[1]); regs.flags |= PERF_EFLAGS_EXACT; } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 382366977d4c..7f2739e03e79 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + /* + * If PMU counter has PEBS enabled it is not enough to disable counter + * on a guest entry since PEBS memory write can overshoot guest entry + * and corrupt guest memory. Disabling PEBS solves the problem. + */ + arr[1].msr = MSR_IA32_PEBS_ENABLE; + arr[1].host = cpuc->pebs_enabled; + arr[1].guest = 0; - *nr = 1; + *nr = 2; return arr; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 629ae0b7ad90..e38d97bf4259 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) * We sampled a branch insn, rewind using the LBR stack */ if (ip == to) { - regs->ip = from; + set_linear_ip(regs, from); return 1; } @@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) } while (to < ip); if (to == ip) { - regs->ip = old_to; + set_linear_ip(regs, old_to); return 1; } @@ -569,7 +569,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event, * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; - regs.ip = pebs->ip; + regs.flags = pebs->flags; + set_linear_ip(®s, pebs->ip); regs.bp = pebs->bp; regs.sp = pebs->sp; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 7563fda9f033..0a5571080e74 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -796,7 +796,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = { DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); -DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63"); DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); @@ -902,16 +901,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = { .attrs = nhmex_uncore_cbox_formats_attr, }; +/* msr offset for each instance of cbox */ +static unsigned nhmex_cbox_msr_offsets[] = { + 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, +}; + static struct intel_uncore_type nhmex_uncore_cbox = { .name = "cbox", .num_counters = 6, - .num_boxes = 8, + .num_boxes = 10, .perf_ctr_bits = 48, .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, .event_mask = NHMEX_PMON_RAW_EVENT_MASK, .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, - .msr_offset = NHMEX_C_MSR_OFFSET, + .msr_offsets = nhmex_cbox_msr_offsets, .pair_ctr_ctl = 1, .ops = &nhmex_uncore_ops, .format_group = &nhmex_uncore_cbox_format_group @@ -1032,24 +1036,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = { static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) { - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { - reg1->config = event->attr.config1; - reg2->config = event->attr.config2; - } else { - reg1->config = ~0ULL; - reg2->config = ~0ULL; - } + /* only TO_R_PROG_EV event uses the match/mask register */ + if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != + NHMEX_S_EVENT_TO_R_PROG_EV) + return 0; if (box->pmu->pmu_idx == 0) reg1->reg = NHMEX_S0_MSR_MM_CFG; else reg1->reg = NHMEX_S1_MSR_MM_CFG; - reg1->idx = 0; - + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; return 0; } @@ -1059,8 +1061,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event_extra *reg1 = &hwc->extra_reg; struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - wrmsrl(reg1->reg, 0); - if (reg1->config != ~0ULL || reg2->config != ~0ULL) { + if (reg1->idx != EXTRA_REG_NONE) { + wrmsrl(reg1->reg, 0); wrmsrl(reg1->reg + 1, reg1->config); wrmsrl(reg1->reg + 2, reg2->config); wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); @@ -1074,7 +1076,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = { &format_attr_edge.attr, &format_attr_inv.attr, &format_attr_thresh8.attr, - &format_attr_mm_cfg.attr, &format_attr_match.attr, &format_attr_mask.attr, NULL, @@ -1142,6 +1143,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { EVENT_EXTRA_END }; +/* Nehalem-EX or Westmere-EX ? */ +bool uncore_nhmex; + static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) { struct intel_uncore_extra_reg *er; @@ -1171,18 +1175,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 return false; /* mask of the shared fields */ - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; raw_spin_lock_irqsave(&er->lock, flags); /* add mask of the non-shared field if it's in use */ - if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) - mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { + if (uncore_nhmex) + mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + } if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { atomic_add(1 << (idx * 8), &er->ref); - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | - NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | + NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | + WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); er->config &= ~mask; er->config |= (config & mask); ret = true; @@ -1216,7 +1231,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) /* get the non-shared control bits and shift them */ idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (uncore_nhmex) + config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); if (new_idx > orig_idx) { idx = new_idx - orig_idx; config <<= 3 * idx; @@ -1226,6 +1244,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) } /* add the shared control bits back */ + if (uncore_nhmex) + config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + else + config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; if (modify) { /* adjust the main event selector */ @@ -1264,7 +1286,8 @@ again: } /* for the match/mask registers */ - if ((uncore_box_is_fake(box) || !reg2->alloc) && + if (reg2->idx != EXTRA_REG_NONE && + (uncore_box_is_fake(box) || !reg2->alloc) && !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) goto fail; @@ -1278,7 +1301,8 @@ again: if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) nhmex_mbox_alter_er(event, idx[0], true); reg1->alloc |= alloc; - reg2->alloc = 1; + if (reg2->idx != EXTRA_REG_NONE) + reg2->alloc = 1; } return NULL; fail: @@ -1342,9 +1366,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event struct extra_reg *er; unsigned msr; int reg_idx = 0; - - if (WARN_ON_ONCE(reg1->idx != -1)) - return -EINVAL; /* * The mbox events may require 2 extra MSRs at the most. But only * the lower 32 bits in these MSRs are significant, so we can use @@ -1355,11 +1376,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; - if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || - er->idx == __BITS_VALUE(reg1->idx, 1, 8)) - continue; - if (WARN_ON_ONCE(reg_idx >= 2)) - return -EINVAL; msr = er->msr + type->msr_offset * box->pmu->pmu_idx; if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) @@ -1368,6 +1384,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event /* always use the 32~63 bits to pass the PLD config */ if (er->idx == EXTRA_REG_NHMEX_M_PLD) reg_idx = 1; + else if (WARN_ON_ONCE(reg_idx > 0)) + return -EINVAL; reg1->idx &= ~(0xff << (reg_idx * 8)); reg1->reg &= ~(0xffff << (reg_idx * 16)); @@ -1376,17 +1394,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event reg1->config = event->attr.config1; reg_idx++; } - /* use config2 to pass the filter config */ - reg2->idx = EXTRA_REG_NHMEX_M_FILTER; - if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) - reg2->config = event->attr.config2; - else - reg2->config = ~0ULL; - if (box->pmu->pmu_idx == 0) - reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; - else - reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; - + /* + * The mbox only provides ability to perform address matching + * for the PLD events. + */ + if (reg_idx == 2) { + reg2->idx = EXTRA_REG_NHMEX_M_FILTER; + if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) + reg2->config = event->attr.config2; + else + reg2->config = ~0ULL; + if (box->pmu->pmu_idx == 0) + reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; + else + reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; + } return 0; } @@ -1422,34 +1444,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), nhmex_mbox_shared_reg_config(box, idx)); - wrmsrl(reg2->reg, 0); - if (reg2->config != ~0ULL) { - wrmsrl(reg2->reg + 1, - reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); - wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & - (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); - wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + if (reg2->idx != EXTRA_REG_NONE) { + wrmsrl(reg2->reg, 0); + if (reg2->config != ~0ULL) { + wrmsrl(reg2->reg + 1, + reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); + wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & + (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); + wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + } } wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); } -DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); -DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); -DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); -DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); -DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); -DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); -DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); -DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); -DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); -DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); +DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); +DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); +DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); +DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); +DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); +DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); +DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); +DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); static struct attribute *nhmex_uncore_mbox_formats_attr[] = { &format_attr_count_mode.attr, @@ -1458,7 +1482,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = { &format_attr_flag_mode.attr, &format_attr_inc_sel.attr, &format_attr_set_flag_sel.attr, - &format_attr_filter_cfg.attr, + &format_attr_filter_cfg_en.attr, &format_attr_filter_match.attr, &format_attr_filter_mask.attr, &format_attr_dsp.attr, @@ -1482,6 +1506,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = { { /* end: all zeroes */ }, }; +static struct uncore_event_desc wsmex_uncore_mbox_events[] = { + INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), + INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), + { /* end: all zeroes */ }, +}; + static struct intel_uncore_ops nhmex_uncore_mbox_ops = { NHMEX_UNCORE_OPS_COMMON_INIT(), .enable_event = nhmex_mbox_msr_enable_event, @@ -1513,7 +1543,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) struct hw_perf_event_extra *reg1 = &hwc->extra_reg; int port; - /* adjust the main event selector */ + /* adjust the main event selector and extra register index */ if (reg1->idx % 2) { reg1->idx--; hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1522,29 +1552,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; } - /* adjust address or config of extra register */ + /* adjust extra register config */ port = reg1->idx / 6 + box->pmu->pmu_idx * 4; switch (reg1->idx % 6) { - case 0: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); - break; - case 1: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); - break; case 2: - /* the 8~15 bits to the 0~7 bits */ + /* shift the 8~15 bits to the 0~7 bits */ reg1->config >>= 8; break; case 3: - /* the 0~7 bits to the 8~15 bits */ + /* shift the 0~7 bits to the 8~15 bits */ reg1->config <<= 8; break; - case 4: - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); - break; - case 5: - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); - break; }; } @@ -1671,7 +1689,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - int port, idx; + int idx; idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1681,27 +1699,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event reg1->idx = idx; reg1->config = event->attr.config1; - port = idx / 6 + box->pmu->pmu_idx * 4; - idx %= 6; - switch (idx) { - case 0: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); - break; - case 1: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); - break; - case 2: - case 3: - reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); - break; + switch (idx % 6) { case 4: case 5: - if (idx == 4) - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); - else - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); - reg2->config = event->attr.config2; hwc->config |= event->attr.config & (~0ULL << 32); + reg2->config = event->attr.config2; break; }; return 0; @@ -1727,28 +1729,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - int idx, er_idx; + int idx, port; - idx = reg1->idx % 6; - er_idx = idx; - if (er_idx > 2) - er_idx--; - er_idx += (reg1->idx / 6) * 5; + idx = reg1->idx; + port = idx / 6 + box->pmu->pmu_idx * 4; - switch (idx) { + switch (idx % 6) { case 0: + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); + break; case 1: - wrmsrl(reg1->reg, reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); break; case 2: case 3: - wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); + wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), + nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); break; case 4: + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); + break; case 5: - wrmsrl(reg1->reg, reg1->config); - wrmsrl(reg1->reg + 1, hwc->config >> 32); - wrmsrl(reg1->reg + 2, reg2->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); break; }; @@ -1756,8 +1764,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); } -DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); -DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); @@ -2303,6 +2311,7 @@ int uncore_pmu_event_init(struct perf_event *event) event->hw.idx = -1; event->hw.last_tag = ~0ULL; event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; if (event->attr.config == UNCORE_FIXED_EVENT) { /* no fixed counter */ @@ -2373,7 +2382,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type) type->attr_groups[1] = NULL; } -static void uncore_types_exit(struct intel_uncore_type **types) +static void __init uncore_types_exit(struct intel_uncore_type **types) { int i; for (i = 0; types[i]; i++) @@ -2814,7 +2823,13 @@ static int __init uncore_cpu_init(void) snbep_uncore_cbox.num_boxes = max_cores; msr_uncores = snbep_msr_uncores; break; - case 46: + case 46: /* Nehalem-EX */ + uncore_nhmex = true; + case 47: /* Westmere-EX aka. Xeon E7 */ + if (!uncore_nhmex) + nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; + if (nhmex_uncore_cbox.num_boxes > max_cores) + nhmex_uncore_cbox.num_boxes = max_cores; msr_uncores = nhmex_msr_uncores; break; default: diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index f3851892e077..5b81c1856aac 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -5,7 +5,7 @@ #include "perf_event.h" #define UNCORE_PMU_NAME_LEN 32 -#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) #define UNCORE_FIXED_EVENT 0xff #define UNCORE_PMC_IDX_MAX_GENERIC 8 @@ -230,6 +230,7 @@ #define NHMEX_S1_MSR_MASK 0xe5a #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) +#define NHMEX_S_EVENT_TO_R_PROG_EV 0 /* NHM-EX Mbox */ #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 @@ -275,18 +276,12 @@ NHMEX_M_PMON_CTL_INC_SEL_MASK | \ NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) - -#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f -#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5) -#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8) -#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23) -#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \ - (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) +#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) +#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) +#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n))) + /* * use the 9~13 bits to select event If the 7th bit is not set, * otherwise use the 19~21 bits to select event. @@ -368,6 +363,7 @@ struct intel_uncore_type { unsigned num_shared_regs:8; unsigned single_fixed:1; unsigned pair_ctr_ctl:1; + unsigned *msr_offsets; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; @@ -485,29 +481,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) return idx * 8 + box->pmu->type->perf_ctr; } -static inline -unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) +{ + struct intel_uncore_pmu *pmu = box->pmu; + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) { if (!box->pmu->type->box_ctl) return 0; - return box->pmu->type->box_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); } -static inline -unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) { if (!box->pmu->type->fixed_ctl) return 0; - return box->pmu->type->fixed_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); } -static inline -unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) { - return box->pmu->type->fixed_ctr + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); } static inline @@ -515,7 +513,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) { return box->pmu->type->event_ctl + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + uncore_msr_box_offset(box); } static inline @@ -523,7 +521,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) { return box->pmu->type->perf_ctr + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + uncore_msr_box_offset(box); } static inline diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 1f5f1d5d2a02..7ad683d78645 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -328,6 +328,7 @@ void fixup_irqs(void) chip->irq_retrigger(data); raw_spin_unlock(&desc->lock); } + __this_cpu_write(vector_irq[vector], -1); } } #endif diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 1d5d31ea686b..dc1404bf8e4b 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) { struct setup_data_node *node; struct setup_data *data; - int error = -ENOMEM; + int error; struct dentry *d; struct page *pg; u64 pa_data; @@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent) while (pa_data) { node = kmalloc(sizeof(*node), GFP_KERNEL); - if (!node) + if (!node) { + error = -ENOMEM; goto err_dir; + } pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT); if (PageHighMem(pg)) { diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 1df8fb9e1d5d..e498b18f010c 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) addr &= 1; if (addr == 0) { if (val & 0x10) { + u8 edge_irr = s->irr & ~s->elcr; + int i; + bool found; + struct kvm_vcpu *vcpu; + s->init4 = val & 1; s->last_irr = 0; s->irr &= s->elcr; @@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) if (val & 0x08) pr_pic_unimpl( "level sensitive irq not supported"); + + kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) + if (kvm_apic_accept_pic_intr(vcpu)) { + found = true; + break; + } + + + if (found) + for (irq = 0; irq < PIC_NUM_PINS/2; irq++) + if (edge_irr & (1 << irq)) + pic_clear_isr(s, irq); } else if (val & 0x08) { if (val & 0x04) s->poll = 1; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c39b60707e02..c00f03de1b79 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1488,13 +1488,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } -#else - /* - * The sysexit path does not restore ds/es, so we must set them to - * a reasonable value ourselves. - */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); #endif reload_tss(); #ifdef CONFIG_X86_64 @@ -6370,6 +6363,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); +#ifndef CONFIG_X86_64 + /* + * The sysexit path does not restore ds/es, so we must set them to + * a reasonable value ourselves. + * + * We can't defer this to vmx_load_host_state() since that function + * may be executed in interrupt context, which saves and restore segments + * around it, nullifying its effect. + */ + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); +#endif + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_CPL) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 59b59508ff07..42bce48f6928 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) */ getboottime(&boot); + if (kvm->arch.kvmclock_offset) { + struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); + boot = timespec_sub(boot, ts); + } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f6679a7fb8ca..b91e48512425 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) } /* - * search for a shareable pmd page for hugetlb. + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the + * !shared pmd case because we can allocate the pmd later as well, it makes the + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_mutex section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing. */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +static pte_t * +huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; @@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; + pte_t *pte; if (!vma_shareable(vma, addr)) - return; + return (pte_t *)pmd_alloc(mm, pud, addr); mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { @@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: + pte = (pte_t *)pmd_alloc(mm, pud, addr); mutex_unlock(&mapping->i_mmap_mutex); + return pte; } /* @@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); - pte = (pte_t *) pmd_alloc(mm, pud, addr); + pte = huge_pmd_share(mm, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 931930a96160..a718e0d23503 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, /* * On success we use clflush, when the CPU supports it to - * avoid the wbindv. If the CPU does not support it, in the - * error case, and during early boot (for EFI) we fall back - * to cpa_flush_all (which uses wbinvd): + * avoid the wbindv. If the CPU does not support it and in the + * error case we fall back to cpa_flush_all (which uses + * wbindv): */ - if (early_boot_irqs_disabled) - __cpa_flush_all((void *)(long)cache); - else if (!ret && cpu_has_clflush) { + if (!ret && cpu_has_clflush) { if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { cpa_flush_array(addr, numpages, cache, cpa.flags, pages); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 4599c3e8bcb6..4ddf497ca65b 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;} #endif /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { u64 start, end; int node, pxm; if (srat_disabled()) - return; + return -1; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); - return; + return -1; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) - return; + return -1; if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) - return; + return -1; start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; @@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); bad_srat(); - return; + return -1; } if (numa_add_memblk(node, start, end) < 0) { bad_srat(); - return; + return -1; } node_set(node, numa_nodes_parsed); @@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1); + return 0; } void __init acpi_numa_arch_fixup(void) {} diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 2dc29f51e75a..92660edaa1e7 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map( return status; } -static int efi_set_rtc_mmss(unsigned long nowtime) +static efi_status_t __init phys_efi_get_time(efi_time_t *tm, + efi_time_cap_t *tc) +{ + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + efi_call_phys_prelog(); + status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm), + virt_to_phys(tc)); + efi_call_phys_epilog(); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; +} + +int efi_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes; efi_status_t status; @@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime) return 0; } -static unsigned long efi_get_time(void) +unsigned long efi_get_time(void) { efi_status_t status; efi_time_t eft; @@ -606,13 +621,18 @@ static int __init efi_runtime_init(void) } /* * We will only need *early* access to the following - * EFI runtime service before set_virtual_address_map + * two EFI runtime services before set_virtual_address_map * is invoked. */ + efi_phys.get_time = (efi_get_time_t *)runtime->get_time; efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) runtime->set_virtual_address_map; - + /* + * Make efi_get_time can be called before entering + * virtual mode. + */ + efi.get_time = phys_efi_get_time; early_iounmap(runtime, sizeof(efi_runtime_services_t)); return 0; @@ -700,10 +720,12 @@ void __init efi_init(void) efi_enabled = 0; return; } +#ifdef CONFIG_X86_32 if (efi_native) { x86_platform.get_wallclock = efi_get_time; x86_platform.set_wallclock = efi_set_rtc_mmss; } +#endif #if EFI_DEBUG print_efi_memmap(); diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c index 0ce8616c88ae..d75582d1aa55 100644 --- a/arch/x86/platform/olpc/olpc-xo1-pm.c +++ b/arch/x86/platform/olpc/olpc-xo1-pm.c @@ -18,6 +18,7 @@ #include <linux/pm.h> #include <linux/mfd/core.h> #include <linux/suspend.h> +#include <linux/olpc-ec.h> #include <asm/io.h> #include <asm/olpc.h> @@ -51,16 +52,11 @@ EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_clear); static int xo1_power_state_enter(suspend_state_t pm_state) { unsigned long saved_sci_mask; - int r; /* Only STR is supported */ if (pm_state != PM_SUSPEND_MEM) return -EINVAL; - r = olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0); - if (r) - return r; - /* * Save SCI mask (this gets lost since PM1_EN is used as a mask for * wakeup events, which is not necessarily the same event set) @@ -76,16 +72,6 @@ static int xo1_power_state_enter(suspend_state_t pm_state) /* Restore SCI mask (using dword access to CS5536_PM1_EN) */ outl(saved_sci_mask, acpi_base + CS5536_PM1_STS); - /* Tell the EC to stop inhibiting SCIs */ - olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0); - - /* - * Tell the wireless module to restart USB communication. - * Must be done twice. - */ - olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); - olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); - return 0; } diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index 04b8c73659c5..63d4aa40956e 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -23,6 +23,7 @@ #include <linux/power_supply.h> #include <linux/suspend.h> #include <linux/workqueue.h> +#include <linux/olpc-ec.h> #include <asm/io.h> #include <asm/msr.h> diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c index 599be499fdf7..2fdca25905ae 100644 --- a/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/arch/x86/platform/olpc/olpc-xo15-sci.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/power_supply.h> +#include <linux/olpc-ec.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c index a4bee53c2e54..27376081ddec 100644 --- a/arch/x86/platform/olpc/olpc.c +++ b/arch/x86/platform/olpc/olpc.c @@ -14,14 +14,13 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> -#include <linux/spinlock.h> #include <linux/io.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/syscore_ops.h> -#include <linux/debugfs.h> #include <linux/mutex.h> +#include <linux/olpc-ec.h> #include <asm/geode.h> #include <asm/setup.h> @@ -31,17 +30,6 @@ struct olpc_platform_t olpc_platform_info; EXPORT_SYMBOL_GPL(olpc_platform_info); -static DEFINE_SPINLOCK(ec_lock); - -/* debugfs interface to EC commands */ -#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */ -#define EC_MAX_CMD_REPLY (8) - -static struct dentry *ec_debugfs_dir; -static DEFINE_MUTEX(ec_debugfs_cmd_lock); -static unsigned char ec_debugfs_resp[EC_MAX_CMD_REPLY]; -static unsigned int ec_debugfs_resp_bytes; - /* EC event mask to be applied during suspend (defining wakeup sources). */ static u16 ec_wakeup_mask; @@ -125,16 +113,13 @@ static int __wait_on_obf(unsigned int line, unsigned int port, int desired) * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while * OpenFirmware's source is available, the EC's is not. */ -int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, - unsigned char *outbuf, size_t outlen) +static int olpc_xo1_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, + size_t outlen, void *arg) { - unsigned long flags; int ret = -EIO; int i; int restarts = 0; - spin_lock_irqsave(&ec_lock, flags); - /* Clear OBF */ for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) inb(0x68); @@ -198,10 +183,8 @@ restart: ret = 0; err: - spin_unlock_irqrestore(&ec_lock, flags); return ret; } -EXPORT_SYMBOL_GPL(olpc_ec_cmd); void olpc_ec_wakeup_set(u16 value) { @@ -280,96 +263,6 @@ int olpc_ec_sci_query(u16 *sci_value) } EXPORT_SYMBOL_GPL(olpc_ec_sci_query); -static ssize_t ec_debugfs_cmd_write(struct file *file, const char __user *buf, - size_t size, loff_t *ppos) -{ - int i, m; - unsigned char ec_cmd[EC_MAX_CMD_ARGS]; - unsigned int ec_cmd_int[EC_MAX_CMD_ARGS]; - char cmdbuf[64]; - int ec_cmd_bytes; - - mutex_lock(&ec_debugfs_cmd_lock); - - size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size); - - m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0], - &ec_debugfs_resp_bytes, - &ec_cmd_int[1], &ec_cmd_int[2], &ec_cmd_int[3], - &ec_cmd_int[4], &ec_cmd_int[5]); - if (m < 2 || ec_debugfs_resp_bytes > EC_MAX_CMD_REPLY) { - /* reset to prevent overflow on read */ - ec_debugfs_resp_bytes = 0; - - printk(KERN_DEBUG "olpc-ec: bad ec cmd: " - "cmd:response-count [arg1 [arg2 ...]]\n"); - size = -EINVAL; - goto out; - } - - /* convert scanf'd ints to char */ - ec_cmd_bytes = m - 2; - for (i = 0; i <= ec_cmd_bytes; i++) - ec_cmd[i] = ec_cmd_int[i]; - - printk(KERN_DEBUG "olpc-ec: debugfs cmd 0x%02x with %d args " - "%02x %02x %02x %02x %02x, want %d returns\n", - ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], ec_cmd[3], - ec_cmd[4], ec_cmd[5], ec_debugfs_resp_bytes); - - olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1], - ec_cmd_bytes, ec_debugfs_resp, ec_debugfs_resp_bytes); - - printk(KERN_DEBUG "olpc-ec: response " - "%02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n", - ec_debugfs_resp[0], ec_debugfs_resp[1], ec_debugfs_resp[2], - ec_debugfs_resp[3], ec_debugfs_resp[4], ec_debugfs_resp[5], - ec_debugfs_resp[6], ec_debugfs_resp[7], ec_debugfs_resp_bytes); - -out: - mutex_unlock(&ec_debugfs_cmd_lock); - return size; -} - -static ssize_t ec_debugfs_cmd_read(struct file *file, char __user *buf, - size_t size, loff_t *ppos) -{ - unsigned int i, r; - char *rp; - char respbuf[64]; - - mutex_lock(&ec_debugfs_cmd_lock); - rp = respbuf; - rp += sprintf(rp, "%02x", ec_debugfs_resp[0]); - for (i = 1; i < ec_debugfs_resp_bytes; i++) - rp += sprintf(rp, ", %02x", ec_debugfs_resp[i]); - mutex_unlock(&ec_debugfs_cmd_lock); - rp += sprintf(rp, "\n"); - - r = rp - respbuf; - return simple_read_from_buffer(buf, size, ppos, respbuf, r); -} - -static const struct file_operations ec_debugfs_genops = { - .write = ec_debugfs_cmd_write, - .read = ec_debugfs_cmd_read, -}; - -static void setup_debugfs(void) -{ - ec_debugfs_dir = debugfs_create_dir("olpc-ec", 0); - if (ec_debugfs_dir == ERR_PTR(-ENODEV)) - return; - - debugfs_create_file("cmd", 0600, ec_debugfs_dir, NULL, - &ec_debugfs_genops); -} - -static int olpc_ec_suspend(void) -{ - return olpc_ec_mask_write(ec_wakeup_mask); -} - static bool __init check_ofw_architecture(struct device_node *root) { const char *olpc_arch; @@ -424,8 +317,59 @@ static int __init add_xo1_platform_devices(void) return 0; } -static struct syscore_ops olpc_syscore_ops = { - .suspend = olpc_ec_suspend, +static int olpc_xo1_ec_probe(struct platform_device *pdev) +{ + /* get the EC revision */ + olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, + (unsigned char *) &olpc_platform_info.ecver, 1); + + /* EC version 0x5f adds support for wide SCI mask */ + if (olpc_platform_info.ecver >= 0x5f) + olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI; + + pr_info("OLPC board revision %s%X (EC=%x)\n", + ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", + olpc_platform_info.boardrev >> 4, + olpc_platform_info.ecver); + + return 0; +} +static int olpc_xo1_ec_suspend(struct platform_device *pdev) +{ + olpc_ec_mask_write(ec_wakeup_mask); + + /* + * Squelch SCIs while suspended. This is a fix for + * <http://dev.laptop.org/ticket/1835>. + */ + return olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0); +} + +static int olpc_xo1_ec_resume(struct platform_device *pdev) +{ + /* Tell the EC to stop inhibiting SCIs */ + olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0); + + /* + * Tell the wireless module to restart USB communication. + * Must be done twice. + */ + olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); + olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); + + return 0; +} + +static struct olpc_ec_driver ec_xo1_driver = { + .probe = olpc_xo1_ec_probe, + .suspend = olpc_xo1_ec_suspend, + .resume = olpc_xo1_ec_resume, + .ec_cmd = olpc_xo1_ec_cmd, +}; + +static struct olpc_ec_driver ec_xo1_5_driver = { + .probe = olpc_xo1_ec_probe, + .ec_cmd = olpc_xo1_ec_cmd, }; static int __init olpc_init(void) @@ -435,16 +379,17 @@ static int __init olpc_init(void) if (!olpc_ofw_present() || !platform_detect()) return 0; - spin_lock_init(&ec_lock); + /* register the XO-1 and 1.5-specific EC handler */ + if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */ + olpc_ec_driver_register(&ec_xo1_driver, NULL); + else + olpc_ec_driver_register(&ec_xo1_5_driver, NULL); + platform_device_register_simple("olpc-ec", -1, NULL, 0); /* assume B1 and above models always have a DCON */ if (olpc_board_at_least(olpc_board(0xb1))) olpc_platform_info.flags |= OLPC_F_DCON; - /* get the EC revision */ - olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, - (unsigned char *) &olpc_platform_info.ecver, 1); - #ifdef CONFIG_PCI_OLPC /* If the VSA exists let it emulate PCI, if not emulate in kernel. * XO-1 only. */ @@ -452,14 +397,6 @@ static int __init olpc_init(void) !cs5535_has_vsa2()) x86_init.pci.arch_init = pci_olpc_init; #endif - /* EC version 0x5f adds support for wide SCI mask */ - if (olpc_platform_info.ecver >= 0x5f) - olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI; - - printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", - ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", - olpc_platform_info.boardrev >> 4, - olpc_platform_info.ecver); if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */ r = add_xo1_platform_devices(); @@ -467,9 +404,6 @@ static int __init olpc_init(void) return r; } - register_syscore_ops(&olpc_syscore_ops); - setup_debugfs(); - return 0; } diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index b2d534cab25f..88692871823f 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/../../boot/code16gcc.h \ - -fno-strict-aliasing -fomit-frame-pointer \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ $(call cc-option, -fno-unit-at-a-time)) \ diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 51171aeff0dc..a582bfed95bb 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -60,8 +60,8 @@ 51 common getsockname sys_getsockname 52 common getpeername sys_getpeername 53 common socketpair sys_socketpair -54 common setsockopt sys_setsockopt -55 common getsockopt sys_getsockopt +54 64 setsockopt sys_setsockopt +55 64 getsockopt sys_getsockopt 56 common clone stub_clone 57 common fork stub_fork 58 common vfork stub_vfork @@ -318,7 +318,7 @@ 309 common getcpu sys_getcpu 310 64 process_vm_readv sys_process_vm_readv 311 64 process_vm_writev sys_process_vm_writev -312 64 kcmp sys_kcmp +312 common kcmp sys_kcmp # # x32-specific system call numbers start at 512 to avoid cache impact @@ -353,3 +353,5 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev +541 x32 setsockopt compat_sys_setsockopt +542 x32 getsockopt compat_sys_getsockopt diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h index 950dfb7b8417..e72cd0df5ba3 100644 --- a/arch/x86/um/asm/ptrace.h +++ b/arch/x86/um/asm/ptrace.h @@ -30,10 +30,10 @@ #define profile_pc(regs) PT_REGS_IP(regs) #define UPT_RESTART_SYSCALL(r) (UPT_IP(r) -= 2) -#define UPT_SET_SYSCALL_RETURN(r, res) (UPT_AX(r) = (res)) +#define PT_REGS_SET_SYSCALL_RETURN(r, res) (PT_REGS_AX(r) = (res)) -static inline long regs_return_value(struct uml_pt_regs *regs) +static inline long regs_return_value(struct pt_regs *regs) { - return UPT_AX(regs); + return PT_REGS_AX(regs); } #endif /* __UM_X86_PTRACE_H */ diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 64effdc6da94..b2e91d40a4cb 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -194,6 +194,11 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID * boundary violation will require three middle nodes. */ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); +/* When we populate back during bootup, the amount of pages can vary. The + * max we have is seen is 395979, but that does not mean it can't be more. + * But some machines can have 3GB I/O holes even. So lets reserve enough + * for 4GB of I/O and E820 holes. */ +RESERVE_BRK(p2m_populated, PMD_SIZE * 4); static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e7dee617358e..f3b44a65fc7a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root); static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) -{ - return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), - struct blkcg, css); -} -EXPORT_SYMBOL_GPL(cgroup_to_blkcg); - -static struct blkcg *task_blkcg(struct task_struct *tsk) -{ - return container_of(task_subsys_state(tsk, blkio_subsys_id), - struct blkcg, css); -} - -struct blkcg *bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_css) - return container_of(bio->bi_css, struct blkcg, css); - return task_blkcg(current); -} -EXPORT_SYMBOL_GPL(bio_blkcg); - static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { @@ -84,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(pd); } + blk_exit_rl(&blkg->rl); kfree(blkg); } @@ -91,16 +71,18 @@ static void blkg_free(struct blkcg_gq *blkg) * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with * @q: request_queue the new blkg is associated with + * @gfp_mask: allocation mask to use * * Allocate a new blkg assocating @blkcg and @q. */ -static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) +static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, + gfp_t gfp_mask) { struct blkcg_gq *blkg; int i; /* alloc and init base part */ - blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); + blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); if (!blkg) return NULL; @@ -109,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) blkg->blkcg = blkcg; blkg->refcnt = 1; + /* root blkg uses @q->root_rl, init rl only for !root blkgs */ + if (blkcg != &blkcg_root) { + if (blk_init_rl(&blkg->rl, q, gfp_mask)) + goto err_free; + blkg->rl.blkg = blkg; + } + for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkg_policy_data *pd; @@ -117,11 +106,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) continue; /* alloc per-policy data and attach it to blkg */ - pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); - if (!pd) { - blkg_free(blkg); - return NULL; - } + pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); + if (!pd) + goto err_free; blkg->pd[i] = pd; pd->blkg = blkg; @@ -132,6 +119,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) } return blkg; + +err_free: + blkg_free(blkg); + return NULL; } static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, @@ -175,9 +166,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) } EXPORT_SYMBOL_GPL(blkg_lookup); +/* + * If @new_blkg is %NULL, this function tries to allocate a new one as + * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. + */ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) + struct request_queue *q, + struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; int ret; @@ -189,24 +184,26 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, blkg = __blkg_lookup(blkcg, q); if (blkg) { rcu_assign_pointer(blkcg->blkg_hint, blkg); - return blkg; + goto out_free; } /* blkg holds a reference to blkcg */ - if (!css_tryget(&blkcg->css)) - return ERR_PTR(-EINVAL); + if (!css_tryget(&blkcg->css)) { + blkg = ERR_PTR(-EINVAL); + goto out_free; + } /* allocate */ - ret = -ENOMEM; - blkg = blkg_alloc(blkcg, q); - if (unlikely(!blkg)) - goto err_put; + if (!new_blkg) { + new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); + if (unlikely(!new_blkg)) { + blkg = ERR_PTR(-ENOMEM); + goto out_put; + } + } + blkg = new_blkg; /* insert */ - ret = radix_tree_preload(GFP_ATOMIC); - if (ret) - goto err_free; - spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); if (likely(!ret)) { @@ -215,15 +212,15 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, } spin_unlock(&blkcg->lock); - radix_tree_preload_end(); - if (!ret) return blkg; -err_free: - blkg_free(blkg); -err_put: + + blkg = ERR_PTR(ret); +out_put: css_put(&blkcg->css); - return ERR_PTR(ret); +out_free: + blkg_free(new_blkg); + return blkg; } struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, @@ -235,7 +232,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, */ if (unlikely(blk_queue_bypass(q))) return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); - return __blkg_lookup_create(blkcg, q); + return __blkg_lookup_create(blkcg, q, NULL); } EXPORT_SYMBOL_GPL(blkg_lookup_create); @@ -313,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg) } EXPORT_SYMBOL_GPL(__blkg_release); +/* + * The next function used by blk_queue_for_each_rl(). It's a bit tricky + * because the root blkg uses @q->root_rl instead of its own rl. + */ +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q) +{ + struct list_head *ent; + struct blkcg_gq *blkg; + + /* + * Determine the current blkg list_head. The first entry is + * root_rl which is off @q->blkg_list and mapped to the head. + */ + if (rl == &q->root_rl) { + ent = &q->blkg_list; + } else { + blkg = container_of(rl, struct blkcg_gq, rl); + ent = &blkg->q_node; + } + + /* walk to the next list_head, skip root blkcg */ + ent = ent->next; + if (ent == &q->root_blkg->q_node) + ent = ent->next; + if (ent == &q->blkg_list) + return NULL; + + blkg = container_of(ent, struct blkcg_gq, q_node); + return &blkg->rl; +} + static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) { @@ -734,24 +763,36 @@ int blkcg_activate_policy(struct request_queue *q, struct blkcg_gq *blkg; struct blkg_policy_data *pd, *n; int cnt = 0, ret; + bool preloaded; if (blkcg_policy_enabled(q, pol)) return 0; + /* preallocations for root blkg */ + blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); + if (!blkg) + return -ENOMEM; + + preloaded = !radix_tree_preload(GFP_KERNEL); + blk_queue_bypass_start(q); /* make sure the root blkg exists and count the existing blkgs */ spin_lock_irq(q->queue_lock); rcu_read_lock(); - blkg = __blkg_lookup_create(&blkcg_root, q); + blkg = __blkg_lookup_create(&blkcg_root, q, blkg); rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); goto out_unlock; } q->root_blkg = blkg; + q->root_rl.blkg = blkg; list_for_each_entry(blkg, &q->blkg_list, q_node) cnt++; diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 8ac457ce7783..24597309e23d 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -17,6 +17,7 @@ #include <linux/u64_stats_sync.h> #include <linux/seq_file.h> #include <linux/radix-tree.h> +#include <linux/blkdev.h> /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -93,6 +94,8 @@ struct blkcg_gq { struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; + /* request allocation list for this blkcg-q pair */ + struct request_list rl; /* reference count */ int refcnt; @@ -120,8 +123,6 @@ struct blkcg_policy { extern struct blkcg blkcg_root; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup); -struct blkcg *bio_blkcg(struct bio *bio); struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); @@ -160,6 +161,25 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, void blkg_conf_finish(struct blkg_conf_ctx *ctx); +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *task_blkcg(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_css) + return container_of(bio->bi_css, struct blkcg, css); + return task_blkcg(current); +} + /** * blkg_to_pdata - get policy private data * @blkg: blkg of interest @@ -234,6 +254,95 @@ static inline void blkg_put(struct blkcg_gq *blkg) } /** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; + + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ + blkg = blkg_lookup_create(blkcg, q); + if (unlikely(IS_ERR(blkg))) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + /* root_rl may not have blkg set */ + if (rl->blkg && rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + +/** * blkg_stat_add - add a value to a blkg_stat * @stat: target blkg_stat * @val: value to add @@ -351,6 +460,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) #else /* CONFIG_BLK_CGROUP */ struct cgroup; +struct blkcg; struct blkg_policy_data { }; @@ -361,8 +471,6 @@ struct blkcg_gq { struct blkcg_policy { }; -static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } -static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } static inline int blkcg_init_queue(struct request_queue *q) { return 0; } static inline void blkcg_drain_queue(struct request_queue *q) { } @@ -374,6 +482,9 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } @@ -381,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + #endif /* CONFIG_BLK_CGROUP */ #endif /* _BLK_CGROUP_H */ diff --git a/block/blk-core.c b/block/blk-core.c index 93eb3e4f88ce..4b4dbdfbca89 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); - drain |= q->rq.elvpriv; + drain |= q->nr_rqs_elvpriv; /* * Unfortunately, requests are queued at and tracked from @@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) { drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { - drain |= q->rq.count[i]; + drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; drain |= !list_empty(&q->flush_queue[i]); } @@ -416,9 +416,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { + struct request_list *rl; + spin_lock_irq(q->queue_lock); - for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++) - wake_up_all(&q->rq.wait[i]); + + blk_queue_for_each_rl(rl, q) + for (i = 0; i < ARRAY_SIZE(rl->wait); i++) + wake_up_all(&rl->wait[i]); + spin_unlock_irq(q->queue_lock); } } @@ -517,28 +522,33 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(struct request_queue *q) +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask) { - struct request_list *rl = &q->rq; - if (unlikely(rl->rq_pool)) return 0; + rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, q->node); - + mempool_free_slab, request_cachep, + gfp_mask, q->node); if (!rl->rq_pool) return -ENOMEM; return 0; } +void blk_exit_rl(struct request_list *rl) +{ + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); +} + struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); @@ -680,7 +690,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_free_list(q)) + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) return NULL; q->request_fn = rfn; @@ -722,15 +732,15 @@ bool blk_get_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(struct request_queue *q, struct request *rq) +static inline void blk_free_request(struct request_list *rl, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) { - elv_put_request(q, rq); + elv_put_request(rl->q, rq); if (rq->elv.icq) put_io_context(rq->elv.icq->ioc); } - mempool_free(rq, q->rq.rq_pool); + mempool_free(rq, rl->rq_pool); } /* @@ -767,18 +777,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(struct request_queue *q, int sync) +static void __freed_request(struct request_list *rl, int sync) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; - if (rl->count[sync] < queue_congestion_off_threshold(q)) + /* + * bdi isn't aware of blkcg yet. As all async IOs end up root + * blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl && + rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); if (rl->count[sync] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); - blk_clear_queue_full(q, sync); + blk_clear_rl_full(rl, sync); } } @@ -786,19 +801,20 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, unsigned int flags) +static void freed_request(struct request_list *rl, unsigned int flags) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; int sync = rw_is_sync(flags); + q->nr_rqs[sync]--; rl->count[sync]--; if (flags & REQ_ELVPRIV) - rl->elvpriv--; + q->nr_rqs_elvpriv--; - __freed_request(q, sync); + __freed_request(rl, sync); if (unlikely(rl->starved[sync ^ 1])) - __freed_request(q, sync ^ 1); + __freed_request(rl, sync ^ 1); } /* @@ -837,8 +853,8 @@ static struct io_context *rq_ioc(struct bio *bio) } /** - * get_request - get a free request - * @q: request_queue to allocate request from + * __get_request - get a free request + * @rl: request list to allocate from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) * @gfp_mask: allocation mask @@ -850,20 +866,16 @@ static struct io_context *rq_ioc(struct bio *bio) * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request(struct request_queue *q, int rw_flags, - struct bio *bio, gfp_t gfp_mask) +static struct request *__get_request(struct request_list *rl, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { + struct request_queue *q = rl->q; struct request *rq; - struct request_list *rl = &q->rq; - struct elevator_type *et; - struct io_context *ioc; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - bool retried = false; int may_queue; -retry: - et = q->elevator->type; - ioc = rq_ioc(bio); if (unlikely(blk_queue_dead(q))) return NULL; @@ -875,28 +887,14 @@ retry: if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { /* - * We want ioc to record batching state. If it's - * not already there, creating a new one requires - * dropping queue_lock, which in turn requires - * retesting conditions to avoid queue hang. - */ - if (!ioc && !retried) { - spin_unlock_irq(q->queue_lock); - create_io_context(gfp_mask, q->node); - spin_lock_irq(q->queue_lock); - retried = true; - goto retry; - } - - /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". * This process will be allowed to complete a batch of * requests, others will be blocked. */ - if (!blk_queue_full(q, is_sync)) { + if (!blk_rl_full(rl, is_sync)) { ioc_set_batching(q, ioc); - blk_set_queue_full(q, is_sync); + blk_set_rl_full(rl, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { @@ -909,7 +907,12 @@ retry: } } } - blk_set_queue_congested(q, is_sync); + /* + * bdi isn't aware of blkcg yet. As all async IOs end up + * root blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl) + blk_set_queue_congested(q, is_sync); } /* @@ -920,6 +923,7 @@ retry: if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) return NULL; + q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; @@ -935,7 +939,7 @@ retry: */ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { rw_flags |= REQ_ELVPRIV; - rl->elvpriv++; + q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } @@ -945,22 +949,19 @@ retry: spin_unlock_irq(q->queue_lock); /* allocate and init request */ - rq = mempool_alloc(q->rq.rq_pool, gfp_mask); + rq = mempool_alloc(rl->rq_pool, gfp_mask); if (!rq) goto fail_alloc; blk_rq_init(q, rq); + blk_rq_set_rl(rq, rl); rq->cmd_flags = rw_flags | REQ_ALLOCED; /* init elvpriv */ if (rw_flags & REQ_ELVPRIV) { if (unlikely(et->icq_cache && !icq)) { - create_io_context(gfp_mask, q->node); - ioc = rq_ioc(bio); - if (!ioc) - goto fail_elvpriv; - - icq = ioc_create_icq(ioc, q, gfp_mask); + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); if (!icq) goto fail_elvpriv; } @@ -1000,7 +1001,7 @@ fail_elvpriv: rq->elv.icq = NULL; spin_lock_irq(q->queue_lock); - rl->elvpriv--; + q->nr_rqs_elvpriv--; spin_unlock_irq(q->queue_lock); goto out; @@ -1013,7 +1014,7 @@ fail_alloc: * queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, rw_flags); + freed_request(rl, rw_flags); /* * in the very unlikely event that allocation failed and no @@ -1029,56 +1030,58 @@ rq_starved: } /** - * get_request_wait - get a free request with retry + * get_request - get a free request * @q: request_queue to allocate request from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask * - * Get a free request from @q. This function keeps retrying under memory - * pressure and fails iff @q is dead. + * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this + * function keeps retrying under memory pressure and fails iff @q is dead. * * Must be callled with @q->queue_lock held and, * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request_wait(struct request_queue *q, int rw_flags, - struct bio *bio) +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { const bool is_sync = rw_is_sync(rw_flags) != 0; + DEFINE_WAIT(wait); + struct request_list *rl; struct request *rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); - while (!rq) { - DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; - - if (unlikely(blk_queue_dead(q))) - return NULL; + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ +retry: + rq = __get_request(rl, rw_flags, bio, gfp_mask); + if (rq) + return rq; - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, - TASK_UNINTERRUPTIBLE); + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { + blk_put_rl(rl); + return NULL; + } - trace_block_sleeprq(q, bio, rw_flags & 1); + /* wait on @rl and retry */ + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); - spin_unlock_irq(q->queue_lock); - io_schedule(); + trace_block_sleeprq(q, bio, rw_flags & 1); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - create_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, current->io_context); + spin_unlock_irq(q->queue_lock); + io_schedule(); - spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); + /* + * After sleeping, we become a "batching" process and will be able + * to allocate at least one request, and up to a big batch of them + * for a small period time. See ioc_batching, ioc_set_batching + */ + ioc_set_batching(q, current->io_context); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - }; + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); - return rq; + goto retry; } struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) @@ -1087,11 +1090,11 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw, NULL); - else - rq = get_request(q, rw, NULL, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); if (!rq) spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ @@ -1248,12 +1251,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) */ if (req->cmd_flags & REQ_ALLOCED) { unsigned int flags = req->cmd_flags; + struct request_list *rl = blk_rq_rl(req); BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); - blk_free_request(q, req); - freed_request(q, flags); + blk_free_request(rl, req); + freed_request(rl, flags); + blk_put_rl(rl); } } EXPORT_SYMBOL_GPL(__blk_put_request); @@ -1481,7 +1486,7 @@ get_rq: * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw_flags, bio); + req = get_request(q, rw_flags, bio, GFP_NOIO); if (unlikely(!req)) { bio_endio(bio, -ENODEV); /* @q is dead */ goto out_unlock; @@ -1702,6 +1707,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + if (blk_throtl_bio(q, bio)) return false; /* throttled, will be resubmitted later */ @@ -2896,23 +2909,47 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, } -static void flush_plug_callbacks(struct blk_plug *plug) +static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); - if (list_empty(&plug->cb_list)) - return; + while (!list_empty(&plug->cb_list)) { + list_splice_init(&plug->cb_list, &callbacks); - list_splice_init(&plug->cb_list, &callbacks); - - while (!list_empty(&callbacks)) { - struct blk_plug_cb *cb = list_first_entry(&callbacks, + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); - list_del(&cb->list); - cb->callback(cb); + list_del(&cb->list); + cb->callback(cb, from_schedule); + } + } +} + +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, + int size) +{ + struct blk_plug *plug = current->plug; + struct blk_plug_cb *cb; + + if (!plug) + return NULL; + + list_for_each_entry(cb, &plug->cb_list, list) + if (cb->callback == unplug && cb->data == data) + return cb; + + /* Not currently on the callback list */ + BUG_ON(size < sizeof(*cb)); + cb = kzalloc(size, GFP_ATOMIC); + if (cb) { + cb->data = data; + cb->callback = unplug; + list_add(&cb->list, &plug->cb_list); } + return cb; } +EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { @@ -2924,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug); + flush_plug_callbacks(plug, from_schedule); if (list_empty(&plug->list)) return; diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 893b8007c657..fab4cdd3f7bb 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -244,6 +244,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) /* initialize */ atomic_long_set(&ioc->refcount, 1); + atomic_set(&ioc->nr_tasks, 1); atomic_set(&ioc->active_ref, 1); spin_lock_init(&ioc->lock); INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); diff --git a/block/blk-settings.c b/block/blk-settings.c index d3234fc494ad..565a6786032f 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -143,8 +143,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) lim->discard_zeroes_data = 1; lim->max_segments = USHRT_MAX; lim->max_hw_sectors = UINT_MAX; - - lim->max_sectors = BLK_DEF_MAX_SECTORS; + lim->max_sectors = UINT_MAX; } EXPORT_SYMBOL(blk_set_stacking_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index aa41b47c22d2..9628b291f960 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl = &q->rq; + struct request_list *rl; unsigned long nr; int ret; @@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) q->nr_requests = nr; blk_queue_congestion_threshold(q); + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) @@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_SYNC); - } else { - blk_clear_queue_full(q, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } } - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_ASYNC); - } else { - blk_clear_queue_full(q, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } spin_unlock_irq(q->queue_lock); return ret; } @@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); - struct request_list *rl = &q->rq; blk_sync_queue(q); @@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj) elevator_exit(q->elevator); } - if (rl->rq_pool) - mempool_destroy(rl->rq_pool); + blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5b0659512047..e287c19908c8 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) goto out; } - /* bio_associate_current() needs ioc, try creating */ - create_io_context(GFP_ATOMIC, q->node); - /* * A throtl_grp pointer retrieved under rcu can be used to access * basic fields like stats and io rates. If a group has no rules, diff --git a/block/blk.h b/block/blk.h index 85f6ae42f7d3..2a0ea32d249f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask); +void blk_exit_rl(struct request_list *rl); void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); @@ -33,7 +36,6 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); -void __generic_unplug_device(struct request_queue *); /* * Internal atomic flags for request handling diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 7ad49c88f6b1..deee61fbb741 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q, return 0; } EXPORT_SYMBOL_GPL(bsg_setup_queue); - -/** - * bsg_remove_queue - Deletes the bsg dev from the q - * @q: the request_queue that is to be torn down. - * - * Notes: - * Before unregistering the queue empty any requests that are blocked - */ -void bsg_remove_queue(struct request_queue *q) -{ - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - - if (!q) - return; - - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); -} -EXPORT_SYMBOL_GPL(bsg_remove_queue); diff --git a/block/genhd.c b/block/genhd.c index 9cf5583c90ff..cac7366957c3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -154,7 +154,7 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; - if (!part->nr_sects && + if (!part_nr_sects_read(part) && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && piter->idx == 0)) @@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit); static inline int sector_in_part(struct hd_struct *part, sector_t sector) { return part->start_sect <= sector && - sector < part->start_sect + part->nr_sects; + sector < part->start_sect + part_nr_sects_read(part); } /** @@ -769,8 +769,8 @@ void __init printk_all_partitions(void) printk("%s%s %10llu %s %s", is_part0 ? "" : " ", bdevt_str(part_devt(part), devt_buf), - (unsigned long long)part->nr_sects >> 1, - disk_name(disk, part->partno, name_buf), + (unsigned long long)part_nr_sects_read(part) >> 1 + , disk_name(disk, part->partno, name_buf), uuid_buf); if (is_part0) { if (disk->driverfs_dev != NULL && @@ -862,7 +862,7 @@ static int show_partition(struct seq_file *seqf, void *v) while ((part = disk_part_iter_next(&piter))) seq_printf(seqf, "%4d %7d %10llu %s\n", MAJOR(part_devt(part)), MINOR(part_devt(part)), - (unsigned long long)part->nr_sects >> 1, + (unsigned long long)part_nr_sects_read(part) >> 1, disk_name(sgp, part->partno, buf)); disk_part_iter_exit(&piter); @@ -1268,6 +1268,16 @@ struct gendisk *alloc_disk_node(int minors, int node_id) } disk->part_tbl->part[0] = &disk->part0; + /* + * set_capacity() and get_capacity() currently don't use + * seqcounter to read/update the part0->nr_sects. Still init + * the counter as we can read the sectors in IO submission + * patch using seqence counters. + * + * TODO: Ideally set_capacity() and get_capacity() should be + * converted to make use of bd_mutex and sequence counters. + */ + seqcount_init(&disk->part0.nr_sects_seq); hd_ref_init(&disk->part0); disk->minors = minors; diff --git a/block/ioctl.c b/block/ioctl.c index ba15b2dbfb98..4476e0e85d16 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -13,7 +13,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user { struct block_device *bdevp; struct gendisk *disk; - struct hd_struct *part; + struct hd_struct *part, *lpart; struct blkpg_ioctl_arg a; struct blkpg_partition p; struct disk_part_iter piter; @@ -36,8 +36,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user case BLKPG_ADD_PARTITION: start = p.start >> 9; length = p.length >> 9; - /* check for fit in a hd_struct */ - if (sizeof(sector_t) == sizeof(long) && + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && sizeof(long long) > sizeof(long)) { long pstart = start, plength = length; if (pstart != start || plength != length @@ -92,6 +92,59 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdput(bdevp); return 0; + case BLKPG_RESIZE_PARTITION: + start = p.start >> 9; + /* new length of partition in bytes */ + length = p.length >> 9; + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && + sizeof(long long) > sizeof(long)) { + long pstart = start, plength = length; + if (pstart != start || plength != length + || pstart < 0 || plength < 0) + return -EINVAL; + } + part = disk_get_part(disk, partno); + if (!part) + return -ENXIO; + bdevp = bdget(part_devt(part)); + if (!bdevp) { + disk_put_part(part); + return -ENOMEM; + } + mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + if (start != part->start_sect) { + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EINVAL; + } + /* overlap? */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY); + while ((lpart = disk_part_iter_next(&piter))) { + if (lpart->partno != partno && + !(start + length <= lpart->start_sect || + start >= lpart->start_sect + lpart->nr_sects) + ) { + disk_part_iter_exit(&piter); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EBUSY; + } + } + disk_part_iter_exit(&piter); + part_nr_sects_write(part, (sector_t)length); + i_size_write(bdevp->bd_inode, p.length); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return 0; default: return -EINVAL; } diff --git a/block/partition-generic.c b/block/partition-generic.c index 6df5d6928a44..f1d14519cc04 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -84,7 +84,7 @@ ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); + return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p)); } static ssize_t part_ro_show(struct device *dev, @@ -294,6 +294,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, err = -ENOMEM; goto out_free; } + + seqcount_init(&p->nr_sects_seq); pdev = part_to_dev(p); p->start_sect = start; diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index ac7034129f3f..d5fdd36190cc 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ac_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); static struct acpi_driver acpi_ac_driver = { @@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device) return result; } +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev) { struct acpi_ac *ac; @@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev) kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); return 0; } +#endif static int acpi_ac_remove(struct acpi_device *device, int type) { diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 5ccb99ae3a6f..5de4ec72766d 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void); /* * hwsleep - sleep/wake support (Legacy sleep registers) */ -acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_sleep(u8 sleep_state); -acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state); -acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_wake(u8 sleep_state); /* * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers) */ void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument); -acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_sleep(u8 sleep_state); -acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_wake_prep(u8 sleep_state); -acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_wake(u8 sleep_state); /* * hwvalid - Port I/O with validation diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index 48518dac5342..94996f9ae3ad 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) * FUNCTION: acpi_hw_extended_sleep * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) * ******************************************************************************/ -acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_sleep(u8 sleep_state) { acpi_status status; u8 sleep_type_value; @@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) acpi_gbl_system_awake_and_running = FALSE; - /* Optionally execute _GTS (Going To Sleep) */ - - if (flags & ACPI_EXECUTE_GTS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); - } - /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); @@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_extended_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * @@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) { acpi_status status; u8 sleep_type_value; @@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) &acpi_gbl_FADT.sleep_control); } - /* Optionally execute _BFS (Back From Sleep) */ - - if (flags & ACPI_EXECUTE_BFS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); - } return_ACPI_STATUS(AE_OK); } @@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_wake(u8 sleep_state) { ACPI_FUNCTION_TRACE(hw_extended_wake); diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index 9960fe9ef533..3fddde056a5e 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep") * FUNCTION: acpi_hw_legacy_sleep * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep") * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ -acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_sleep(u8 sleep_state) { struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; @@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) return_ACPI_STATUS(status); } - /* Optionally execute _GTS (Going To Sleep) */ - - if (flags & ACPI_EXECUTE_GTS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); - } - /* Get current value of PM1A control */ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, @@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_legacy_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * @@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) { acpi_status status; struct acpi_bit_register_info *sleep_type_reg_info; @@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) } } - /* Optionally execute _BFS (Back From Sleep) */ - - if (flags & ACPI_EXECUTE_BFS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); - } return_ACPI_STATUS(status); } @@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_legacy_wake * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - Reserved, set to zero * * RETURN: Status * @@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_wake(u8 sleep_state) { acpi_status status; diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index f8684bfe7907..1f165a750ae2 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id); +acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); /* * Dispatch table used to efficiently branch to the various sleep @@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) * ******************************************************************************/ static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) +acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) { acpi_status status; struct acpi_sleep_functions *sleep_functions = @@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) * use the extended sleep registers */ if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { - status = sleep_functions->extended_function(sleep_state, flags); + status = sleep_functions->extended_function(sleep_state); } else { /* Legacy sleep */ - status = sleep_functions->legacy_function(sleep_state, flags); + status = sleep_functions->legacy_function(sleep_state); } return (status); @@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) * For the case where reduced-hardware-only code is being generated, * we know that only the extended sleep registers are available */ - status = sleep_functions->extended_function(sleep_state, flags); + status = sleep_functions->extended_function(sleep_state); return (status); #endif /* !ACPI_REDUCED_HARDWARE */ @@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) * FUNCTION: acpi_enter_sleep_state * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ -acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) +acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) { acpi_status status; @@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) } status = - acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID); + acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state) * Called with interrupts DISABLED. * ******************************************************************************/ -acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags) +acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); status = - acpi_hw_sleep_dispatch(sleep_state, flags, + acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); - - status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID); + status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index ff2c876ec412..45e3e1759fb8 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP /* this is needed to learn about changes made in suspended state */ static int acpi_battery_resume(struct device *dev) { @@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev) acpi_battery_update(battery); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 79d4c22f7a6d..314a3b84bbc7 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device); static int acpi_button_remove(struct acpi_device *device, int type); static void acpi_button_notify(struct acpi_device *device, u32 event); +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); static struct acpi_driver acpi_button_driver = { @@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev) { struct acpi_device *device = to_acpi_device(dev); @@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev) return acpi_lid_send_state(device); return 0; } +#endif static int acpi_button_add(struct acpi_device *device) { diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 669d9ee80d16..bc36a476f1ab 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev); static int acpi_fan_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); static struct acpi_driver acpi_fan_driver = { @@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev) { if (!dev) @@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev) return result; } +#endif static int __init acpi_fan_init(void) { diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index e56f3be7b07d..cb31298ca684 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, return 0; } +static int __initdata parsed_numa_memblks; + static int __init acpi_parse_memory_affinity(struct acpi_subtable_header * header, const unsigned long end) @@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, acpi_table_print_srat_entry(header); /* let architecture-dependent part to do it */ - acpi_numa_memory_affinity_init(memory_affinity); - + if (!acpi_numa_memory_affinity_init(memory_affinity)) + parsed_numa_memblks++; return 0; } @@ -304,8 +306,10 @@ int __init acpi_numa_init(void) acpi_numa_arch_fixup(); - if (cnt <= 0) - return cnt ?: -ENOENT; + if (cnt < 0) + return cnt; + else if (!parsed_numa_memblks) + return -ENOENT; return 0; } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index ec54014c321c..72a2c98bc429 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) OSC_CLOCK_PWR_CAPABILITY_SUPPORT; if (pci_msi_enabled()) flags |= OSC_MSI_SUPPORT; - if (flags != base_flags) - acpi_pci_osc_support(root, flags); + if (flags != base_flags) { + status = acpi_pci_osc_support(root, flags); + if (ACPI_FAILURE(status)) { + dev_info(root->bus->bridge, "ACPI _OSC support " + "notification failed, disabling PCIe ASPM\n"); + pcie_no_aspm(); + flags = base_flags; + } + } if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 215ecd097408..fc1803414629 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, power_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume); static struct acpi_driver acpi_power_driver = { @@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev) { int result = 0, state; @@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev) return result; } +#endif int __init acpi_power_init(void) { diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index ff8e04f2fab4..bfc31cb0dd3e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, /* Normal CPU soft online event */ } else { acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_cst_has_changed(pr); + acpi_processor_hotplug(pr); acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index c0b9aa5faf4c..ff0740e0a9c2 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void) #endif } +#ifdef CONFIG_PM_SLEEP static int acpi_sbs_resume(struct device *dev) { struct acpi_sbs *sbs; @@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev) acpi_sbs_callback(sbs); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 7a7a9c929247..fdcdbb652915 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -28,36 +28,7 @@ #include "internal.h" #include "sleep.h" -u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS; -static unsigned int gts, bfs; -static int set_param_wake_flag(const char *val, struct kernel_param *kp) -{ - int ret = param_set_int(val, kp); - - if (ret) - return ret; - - if (kp->arg == (const char *)>s) { - if (gts) - wake_sleep_flags |= ACPI_EXECUTE_GTS; - else - wake_sleep_flags &= ~ACPI_EXECUTE_GTS; - } - if (kp->arg == (const char *)&bfs) { - if (bfs) - wake_sleep_flags |= ACPI_EXECUTE_BFS; - else - wake_sleep_flags &= ~ACPI_EXECUTE_BFS; - } - return ret; -} -module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644); -module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644); -MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); -MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); - static u8 sleep_states[ACPI_S_STATE_COUNT]; -static bool pwr_btn_event_pending; static void acpi_sleep_tts_switch(u32 acpi_state) { @@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state) #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; +static bool pwr_btn_event_pending; /* * The ACPI specification wants us to save NVS memory regions during hibernation @@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) switch (acpi_state) { case ACPI_STATE_S1: barrier(); - status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); + status = acpi_enter_sleep_state(acpi_state); break; case ACPI_STATE_S3: @@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) /* This violates the spec but is required for bug compatibility. */ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(acpi_state); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the @@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void) ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ - status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); + status = acpi_enter_sleep_state(ACPI_STATE_S4); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(ACPI_STATE_S4); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } @@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void) * enable it here. */ acpi_enable(); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(ACPI_STATE_S4); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) { printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " @@ -892,33 +864,7 @@ static void acpi_power_off(void) /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); - acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); -} - -/* - * ACPI 2.0 created the optional _GTS and _BFS, - * but industry adoption has been neither rapid nor broad. - * - * Linux gets into trouble when it executes poorly validated - * paths through the BIOS, so disable _GTS and _BFS by default, - * but do speak up and offer the option to enable them. - */ -static void __init acpi_gts_bfs_check(void) -{ - acpi_handle dummy; - - if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy))) - { - printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n"); - printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, " - "please notify linux-acpi@vger.kernel.org\n"); - } - if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy))) - { - printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n"); - printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, " - "please notify linux-acpi@vger.kernel.org\n"); - } + acpi_enter_sleep_state(ACPI_STATE_S5); } int __init acpi_sleep_init(void) @@ -979,6 +925,5 @@ int __init acpi_sleep_init(void) * object can also be evaluated when the system enters S5. */ register_reboot_notifier(&tts_notifier); - acpi_gts_bfs_check(); return 0; } diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 240a24400976..7c3f98ba4afe 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) { int result = 0; - if (!strncmp(val, "enable", strlen("enable"))) { + if (!strncmp(val, "enable", sizeof("enable") - 1)) { result = acpi_debug_trace(trace_method_name, trace_debug_level, trace_debug_layer, 0); if (result) @@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) goto exit; } - if (!strncmp(val, "disable", strlen("disable"))) { + if (!strncmp(val, "disable", sizeof("disable") - 1)) { int name = 0; result = acpi_debug_trace((char *)&name, trace_debug_level, trace_debug_layer, 0); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 9fe90e9fecb5..edda74a43406 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, thermal_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); static struct acpi_driver acpi_thermal_driver = { @@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev) { struct acpi_thermal *tz; @@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev) return AE_OK; } +#endif static int thermal_act(const struct dmi_system_id *d) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 24712adf69df..311be18d3f03 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -65,6 +65,8 @@ #include <linux/mbus.h> #include <linux/bitops.h> #include <linux/gfp.h> +#include <linux/of.h> +#include <linux/of_irq.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -4026,7 +4028,7 @@ static int mv_platform_probe(struct platform_device *pdev) struct ata_host *host; struct mv_host_priv *hpriv; struct resource *res; - int n_ports = 0; + int n_ports = 0, irq = 0; int rc; #if defined(CONFIG_HAVE_CLK) int port; @@ -4050,8 +4052,14 @@ static int mv_platform_probe(struct platform_device *pdev) return -EINVAL; /* allocate host */ - mv_platform_data = pdev->dev.platform_data; - n_ports = mv_platform_data->n_ports; + if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + } else { + mv_platform_data = pdev->dev.platform_data; + n_ports = mv_platform_data->n_ports; + irq = platform_get_irq(pdev, 0); + } host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); @@ -4109,8 +4117,7 @@ static int mv_platform_probe(struct platform_device *pdev) dev_info(&pdev->dev, "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, host->n_ports); - rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, - IRQF_SHARED, &mv6_sht); + rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht); if (!rc) return 0; @@ -4205,15 +4212,24 @@ static int mv_platform_resume(struct platform_device *pdev) #define mv_platform_resume NULL #endif +#ifdef CONFIG_OF +static struct of_device_id mv_sata_dt_ids[] __devinitdata = { + { .compatible = "marvell,orion-sata", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); +#endif + static struct platform_driver mv_platform_driver = { - .probe = mv_platform_probe, - .remove = __devexit_p(mv_platform_remove), - .suspend = mv_platform_suspend, - .resume = mv_platform_resume, - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, + .probe = mv_platform_probe, + .remove = __devexit_p(mv_platform_remove), + .suspend = mv_platform_suspend, + .resume = mv_platform_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mv_sata_dt_ids), + }, }; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index d4386019af5d..96cce6d53195 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev) { printk(DEV_LABEL " (itf %d): can't set up page mapping\n", dev->number); - return error; + return -ENOMEM; } IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", dev->number, iadev->pci->revision, base, iadev->irq);) diff --git a/drivers/base/core.c b/drivers/base/core.c index f338037a4f3d..5e6e00bc1652 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1865,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) { char dict[128]; + const char *level_extra = ""; size_t dictlen = 0; const char *subsys; @@ -1911,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev, "DEVICE=+%s:%s", subsys, dev_name(dev)); } skip: + if (level[2]) + level_extra = &level[2]; /* skip past KERN_SOH "L" */ + return printk_emit(0, level[1] - '0', dictlen ? dict : NULL, dictlen, - "%s %s: %pV", - dev_driver_string(dev), dev_name(dev), vaf); + "%s %s: %s%pV", + dev_driver_string(dev), dev_name(dev), + level_extra, vaf); } EXPORT_SYMBOL(__dev_printk); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index d91a3a0b2325..deb4a456cf83 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -156,9 +156,7 @@ static int dev_mkdir(const char *name, umode_t mode) if (!err) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } @@ -218,10 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; } - dput(dentry); - - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 869d7ff2227f..eb78e9640c4a 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev) */ int pm_clk_create(struct device *dev) { - int ret = dev_pm_get_subsys_data(dev); - return ret < 0 ? ret : 0; + return dev_pm_get_subsys_data(dev); } /** diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index a14085cc613f..39c32529b833 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -24,7 +24,6 @@ int dev_pm_get_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; psd = kzalloc(sizeof(*psd), GFP_KERNEL); if (!psd) @@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev) dev->power.subsys_data = psd; pm_clk_init(dev); psd = NULL; - ret = 1; } spin_unlock_irq(&dev->power.lock); @@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev) /* kfree() verifies that its argument is nonzero. */ kfree(psd); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 59894873a3b3..7d9c1cb1c39a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; + else if (__dev_pm_qos_read_value(dev) < 0) + retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; @@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto repeat; } - dev->power.deferred_resume = false; if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ @@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } - if (__dev_pm_qos_read_value(dev) < 0) { - /* Negative PM QoS constraint means "never suspend". */ - retval = -EPERM; - goto out; - } - __update_runtime_status(dev, RPM_SUSPENDING); if (dev->pm_domain) @@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { + dev->power.deferred_resume = false; rpm_resume(dev, 0); retval = -EAGAIN; goto out; @@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags) || dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); + retval = 1; goto no_callback; /* Assume success. */ } spin_unlock(&dev->parent->power.lock); @@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags) } wake_up_all(&dev->power.wait_queue); - if (!retval) + if (retval >= 0) rpm_idle(dev, RPM_ASYNC); out: diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 11b32d2642df..a6e5672c67e7 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { 0, }, }; diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index 26823d97fd9f..9ea4627dc0c2 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) /* for these chips OTP is always available */ present = true; break; - + case BCMA_CHIP_ID_BCM43228: + present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT; + break; default: present = false; break; diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index acda773b3720..38aa6dda6b81 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, { case CMD_TARGET_STATUS: /* Pass it up to the upper layers... */ - if( ei->ScsiStatus) - { -#if 0 - printk(KERN_WARNING "cciss: cmd %p " - "has SCSI Status = %x\n", - c, ei->ScsiStatus); -#endif - cmd->result |= (ei->ScsiStatus << 1); - } - else { /* scsi status is zero??? How??? */ + if (!ei->ScsiStatus) { /* Ordinarily, this case should never happen, but there is a bug in some released firmware revisions that allows it to happen diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index e54e31b02b88..3fbef018ce55 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) @@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, unsigned int enr, count = 0; struct lc_element *e; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { + /* this should be an empty REQ_FLUSH */ + if (size == 0) + return 0; + + if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index fcb956bb4b4c..ba91b408abad 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w if (ctx->error) { dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); err = -EIO; /* ctx->error ? */ } @@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); if (ctx->error) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); /* that should force detach, so the in memory bitmap will be * gone in a moment as well. */ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 02f013a073a7..b2ca143d0053 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -813,7 +813,6 @@ enum { SIGNAL_ASENDER, /* whether asender wants to be interrupted */ SEND_PING, /* whether asender should send a ping asap */ - UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ MD_DIRTY, /* current uuids and flags not yet on disk */ DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ @@ -824,7 +823,6 @@ enum { CRASHED_PRIMARY, /* This node was a crashed primary. * Gets cleared when the state.conn * goes into C_CONNECTED state. */ - NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ CONSIDER_RESYNC, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ @@ -834,6 +832,7 @@ enum { BITMAP_IO_QUEUED, /* Started bitmap IO */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ WAS_IO_ERROR, /* Local disk failed returned IO error */ + FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ NET_CONGESTED, /* The data socket is congested */ @@ -851,6 +850,13 @@ enum { AL_SUSPENDED, /* Activity logging is currently suspended. */ AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ STATE_SENT, /* Do not change state/UUIDs while this is set */ + + CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) + * pending, from drbd worker context. + * If set, bdi_write_congested() returns true, + * so shrink_page_list() would not recurse into, + * and potentially deadlock on, this drbd worker. + */ }; struct drbd_bitmap; /* opaque for drbd_conf */ @@ -1130,8 +1136,8 @@ struct drbd_conf { int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planned */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ - int peer_max_bio_size; - int local_max_bio_size; + unsigned int peer_max_bio_size; + unsigned int local_max_bio_size; }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) @@ -1435,9 +1441,9 @@ struct bm_extent { * hash table. */ #define HT_SHIFT 8 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) -#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ +#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ -#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ +#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */ /* Number of elements in the app_reads_hash */ #define APP_R_HSIZE 15 @@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev, return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); } +enum drbd_force_detach_flags { + DRBD_IO_ERROR, + DRBD_META_IO_ERROR, + DRBD_FORCE_DETACH, +}; + #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) -static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) +static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, + enum drbd_force_detach_flags forcedetach, + const char *where) { switch (mdev->ldev->dc.on_io_error) { case EP_PASS_ON: - if (!forcedetach) { + if (forcedetach == DRBD_IO_ERROR) { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Local IO failed in %s.\n", where); if (mdev->state.disk > D_INCONSISTENT) @@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, case EP_DETACH: case EP_CALL_HELPER: set_bit(WAS_IO_ERROR, &mdev->flags); + if (forcedetach == DRBD_FORCE_DETACH) + set_bit(FORCE_DETACH, &mdev->flags); if (mdev->state.disk > D_FAILED) { _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); dev_err(DEV, @@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, */ #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) static inline void drbd_chk_io_error_(struct drbd_conf *mdev, - int error, int forcedetach, const char *where) + int error, enum drbd_force_detach_flags forcedetach, const char *where) { if (error) { unsigned long flags; @@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); D_ASSERT(ap_bio >= 0); + + if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { + if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) + drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); + } + /* this currently does wake_up for every dec_ap_bio! * maybe rather introduce some type of hysteresis? * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ if (ap_bio < mxb) wake_up(&mdev->misc_wait); - if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { - if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) - drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); - } } static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 920ede2829d6..dbe6135a2abe 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* Do not change the order of the if above and the two below... */ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ + /* we probably will start a resync soon. + * make sure those things are properly reset. */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + drbd_rs_cancel_all(mdev); + drbd_send_uuids(mdev); drbd_send_state(mdev, ns); } @@ -1630,9 +1637,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, eh = mdev->ldev->dc.on_io_error; was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); - /* Immediately allow completion of all application IO, that waits - for completion from the local disk. */ - tl_abort_disk_io(mdev); + if (was_io_error && eh == EP_CALL_HELPER) + drbd_khelper(mdev, "local-io-error"); + + /* Immediately allow completion of all application IO, + * that waits for completion from the local disk, + * if this was a force-detach due to disk_timeout + * or administrator request (drbdsetup detach --force). + * Do NOT abort otherwise. + * Aborting local requests may cause serious problems, + * if requests are completed to upper layers already, + * and then later the already submitted local bio completes. + * This can cause DMA into former bio pages that meanwhile + * have been re-used for other things. + * So aborting local requests may cause crashes, + * or even worse, silent data corruption. + */ + if (test_and_clear_bit(FORCE_DETACH, &mdev->flags)) + tl_abort_disk_io(mdev); /* current state still has to be D_FAILED, * there is only one way out: to D_DISKLESS, @@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, drbd_md_sync(mdev); } put_ldev(mdev); - - if (was_io_error && eh == EP_CALL_HELPER) - drbd_khelper(mdev, "local-io-error"); } /* second half of local IO error, failure to attach, @@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, "ASSERT FAILED: disk is %s while going diskless\n", drbd_disk_str(mdev->state.disk)); - mdev->rs_total = 0; - mdev->rs_failed = 0; - atomic_set(&mdev->rs_pending_cnt, 0); - if (ns.conn >= C_CONNECTED) drbd_send_state(mdev, ns); @@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl { struct p_sizes p; sector_t d_size, u_size; - int q_order_type, max_bio_size; + int q_order_type; + unsigned int max_bio_size; int ok; if (get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl u_size = mdev->ldev->dc.disk_size; q_order_type = drbd_queue_order_type(mdev); max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); + max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); put_ldev(mdev); } else { d_size = 0; @@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ if (mdev->agreed_pro_version <= 94) - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); p.d_size = cpu_to_be64(d_size); p.u_size = cpu_to_be64(u_size); @@ -3521,9 +3537,9 @@ static void drbd_cleanup(void) } /** - * drbd_congested() - Callback for pdflush + * drbd_congested() - Callback for the flusher thread * @congested_data: User data - * @bdi_bits: Bits pdflush is currently interested in + * @bdi_bits: Bits the BDI flusher thread is currently interested in * * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. */ @@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits) goto out; } + if (test_bit(CALLBACK_PENDING, &mdev->flags)) { + r |= (1 << BDI_async_congested); + /* Without good local data, we would need to read from remote, + * and that would need the worker thread as well, which is + * currently blocked waiting for that usermode helper to + * finish. + */ + if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) + r |= (1 << BDI_sync_congested); + else + put_ldev(mdev); + r &= bdi_bits; + reason = 'c'; + goto out; + } + if (get_ldev(mdev)) { q = bdev_get_queue(mdev->ldev->backing_bdev); r = bdi_congested(&q->backing_dev_info, bdi_bits); @@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request); + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); @@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev) if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { /* this was a try anyways ... */ dev_err(DEV, "meta data update failed!\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); } /* Update mdev->ldev->md.la_size_sect, @@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) spin_lock_irq(&mdev->req_lock); if (mdev->state.conn < C_CONNECTED) { - int peer; + unsigned int peer; peer = be32_to_cpu(buffer->la_peer_max_bio_size); - peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); + peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); mdev->peer_max_bio_size = peer; } spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 6d4de6a72e80..fb9dce8daa24 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) char *argv[] = {usermode_helper, cmd, mb, NULL }; int ret; + if (current == mdev->worker.task) + set_bit(CALLBACK_PENDING, &mdev->flags); + snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); if (get_net_conf(mdev)) { @@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); + if (current == mdev->worker.task) + clear_bit(CALLBACK_PENDING, &mdev->flags); + if (ret < 0) /* Ignore any ERRNOs we got. */ ret = 0; @@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev) static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) { struct request_queue * const q = mdev->rq_queue; - int max_hw_sectors = max_bio_size >> 9; - int max_segments = 0; + unsigned int max_hw_sectors = max_bio_size >> 9; + unsigned int max_segments = 0; if (get_ldev_if_state(mdev, D_ATTACHING)) { struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; @@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) { - int now, new, local, peer; + unsigned int now, new, local, peer; now = queue_max_hw_sectors(mdev->rq_queue) << 9; local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ @@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) mdev->local_max_bio_size = local; put_ldev(mdev); } + local = min(local, DRBD_MAX_BIO_SIZE); /* We may ignore peer limits if the peer is modern enough. Because new from 8.3.8 onwards the peer can use multiple BIOs for a single peer_request */ if (mdev->state.conn >= C_CONNECTED) { if (mdev->agreed_pro_version < 94) { - peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ } else if (mdev->agreed_pro_version == 94) peer = DRBD_MAX_SIZE_H80_PACKET; @@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) peer = DRBD_MAX_BIO_SIZE; } - new = min_t(int, local, peer); + new = min(local, peer); if (mdev->state.role == R_PRIMARY && new < now) - dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); + dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now); if (new != now) dev_info(DEV, "max BIO size = %u\n", new); @@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp * to realize a "hot spare" feature (not that I'd recommend that) */ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + /* make sure there is no leftover from previous force-detach attempts */ + clear_bit(FORCE_DETACH, &mdev->flags); + + /* and no leftover from previously aborted resync or verify, either */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + /* allocation not in the IO path, cqueue thread context */ nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); if (!nbc) { @@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } if (dt.detach_force) { + set_bit(FORCE_DETACH, &mdev->flags); drbd_force_state(mdev, NS(disk, D_FAILED)); reply->ret_code = SS_SUCCESS; goto out; @@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); @@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 869bada2ed06..5496104f90b9 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->state.role == R_SECONDARY) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { + /* reset mdev->congestion_reason */ + bdi_rw_congested(&mdev->rq_queue->backing_dev_info); + seq_printf(seq, "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index ea4836e0ae98..c74ca2df7431 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; + if (page == NULL) + return; + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) i = page_chain_free(page); else { @@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, gfp_t gfp_mask) __must_hold(local) { struct drbd_epoch_entry *e; - struct page *page; + struct page *page = NULL; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) @@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, return NULL; } - page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); - if (!page) - goto fail; + if (data_size) { + page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); + if (!page) + goto fail; + } INIT_HLIST_NODE(&e->collision); e->epoch = NULL; @@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ data_size -= dgs; - ERR_IF(data_size == 0) return NULL; ERR_IF(data_size & 0x1ff) return NULL; ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; @@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ if (!e) return NULL; + if (!data_size) + return e; + ds = data_size; page = e->pages; page_chain_for_each(page) { @@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(mdev, dp_flags); + if (e->pages == NULL) { + D_ASSERT(e->size == 0); + D_ASSERT(dp_flags & DP_FLUSH); + } if (dp_flags & DP_MAY_SET_IN_SYNC) e->flags |= EE_MAY_SET_IN_SYNC; @@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev) mdev->ee_hash = NULL; mdev->ee_hash_s = 0; - /* paranoia code */ - for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) - if (h->first) - dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", - (int)(h - mdev->tl_hash), h->first); + /* We may not have had the chance to wait for all locally pending + * application requests. The hlist_add_fake() prevents access after + * free on master bio completion. */ + for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) { + struct drbd_request *req; + struct hlist_node *pos, *n; + hlist_for_each_entry_safe(req, pos, n, h, collision) { + hlist_del_init(&req->collision); + hlist_add_fake(&req->collision); + } + } + kfree(mdev->tl_hash); mdev->tl_hash = NULL; mdev->tl_hash_s = 0; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8e93a6ac9bb6..910335c30927 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); _req_may_be_done_not_susp(req, m); break; @@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; } - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); goto_queue_for_net_read: @@ -1111,13 +1111,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(bio->bi_size > 0); D_ASSERT((bio->bi_size & 0x1ff) == 0); /* to make some things easier, force alignment of requests within the * granularity of our hash tables */ s_enr = bio->bi_sector >> HT_SHIFT; - e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT; + e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr; if (likely(s_enr == e_enr)) { do { @@ -1275,7 +1274,7 @@ void request_timer_fn(unsigned long data) time_after(now, req->start_time + dt) && !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); - __drbd_chk_io_error(mdev, 1); + __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); } nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 620c70ff2231..6bce2cc179d4 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); drbd_queue_work(&mdev->data.work, &e->w); @@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo : list_empty(&mdev->active_ee); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); if (is_syncer_req) @@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) return; } - if (mdev->state.conn < C_AHEAD) { - /* In case a previous resync run was aborted by an IO error/detach on the peer. */ - drbd_rs_cancel_all(mdev); - /* This should be done when we abort the resync. We definitely do not - want to have this for connections going back and forth between - Ahead/Behind and SyncSource/SyncTarget */ - } - if (side == C_SYNC_TARGET) { /* Since application IO was locked out during C_WF_BITMAP_T and C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 553f43a90953..a7d6347aaa79 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -191,6 +191,7 @@ static int print_unex = 1; #include <linux/mutex.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/async.h> /* * PS/2 floppies have much slower step rates than regular floppies. @@ -2516,8 +2517,7 @@ static int make_raw_rw_request(void) set_fdc((long)current_req->rq_disk->private_data); raw_cmd = &default_raw_cmd; - raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | - FD_RAW_NEED_SEEK; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; @@ -4123,7 +4123,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) return get_disk(disks[drive]); } -static int __init floppy_init(void) +static int __init do_floppy_init(void) { int i, unit, drive; int err, dr; @@ -4338,6 +4338,24 @@ out_put_disk: return err; } +#ifndef MODULE +static __init void floppy_async_init(void *data, async_cookie_t cookie) +{ + do_floppy_init(); +} +#endif + +static int __init floppy_init(void) +{ +#ifdef MODULE + return do_floppy_init(); +#else + /* Don't hold up the bootup by the floppy initialization */ + async_schedule(floppy_async_init, NULL); + return 0; +#endif +} + static const struct io_region { int offset; int size; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 76bc96fd01c8..d07c9f7fded6 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -485,7 +485,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd_end_request(req); } else { spin_lock(&nbd->queue_lock); - list_add(&req->queuelist, &nbd->queue_head); + list_add_tail(&req->queuelist, &nbd->queue_head); spin_unlock(&nbd->queue_lock); } diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 9a72277a31df..eb0d8216f557 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,42 +513,19 @@ static void process_page(unsigned long data) } } -struct mm_plug_cb { - struct blk_plug_cb cb; - struct cardinfo *card; -}; - -static void mm_unplug(struct blk_plug_cb *cb) +static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + struct cardinfo *card = cb->data; - spin_lock_irq(&mmcb->card->lock); - activate(mmcb->card); - spin_unlock_irq(&mmcb->card->lock); - kfree(mmcb); + spin_lock_irq(&card->lock); + activate(card); + spin_unlock_irq(&card->lock); + kfree(cb); } static int mm_check_plugged(struct cardinfo *card) { - struct blk_plug *plug = current->plug; - struct mm_plug_cb *mmcb; - - if (!plug) - return 0; - - list_for_each_entry(mmcb, &plug->cb_list, cb.list) { - if (mmcb->cb.callback == mm_unplug && mmcb->card == card) - return 1; - } - /* Not currently on the callback list */ - mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); - if (!mmcb) - return 0; - - mmcb->card = card; - mmcb->cb.callback = mm_unplug; - list_add(&mmcb->cb.list, &plug->cb_list); - return 1; + return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } static void mm_make_request(struct request_queue *q, struct bio *bio) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 10308cd8a7ed..11f36e502136 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0489, 0xe057) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, @@ -104,6 +105,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e27221411036..cef3bac1a543 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -98,6 +98,7 @@ static struct usb_device_id btusb_table[] = { { USB_DEVICE(0x0a5c, 0x21e6) }, { USB_DEVICE(0x0a5c, 0x21e8) }, { USB_DEVICE(0x0a5c, 0x21f3) }, + { USB_DEVICE(0x0a5c, 0x21f4) }, { USB_DEVICE(0x413c, 0x8197) }, /* Foxconn - Hon Hai */ @@ -133,6 +134,7 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 57226424690c..6f007b6c240d 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -239,16 +239,45 @@ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 -#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 -#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 -#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a -#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ -#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9ed92ef5829b..08fc5cbb13cd 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1502,15 +1502,73 @@ static const struct intel_gtt_driver_description { "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, "Haswell", &sandybridge_gtt_driver }, - { PCI_DEVICE_ID_INTEL_HASWELL_SDV, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, "Haswell", &sandybridge_gtt_driver }, { 0, NULL, NULL } }; diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index d706bd0e9e80..4fbdceb6f773 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -160,7 +160,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int omap_rng_suspend(struct device *dev) { diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 89682fa8801e..c4be3519a587 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); #endif +#ifdef CONFIG_PM_SLEEP static int tpm_tis_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); @@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev) return tpm_pm_resume(dev); } +#endif static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index 540795cd0760..d9279385304d 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock; #define MFGPT_PERIODIC (MFGPT_HZ / HZ) /* - * The MFPGT timers on the CS5536 provide us with suitable timers to use + * The MFGPT timers on the CS5536 provide us with suitable timers to use * as clock event sources - not as good as a HPET or APIC, but certainly * better than the PIT. This isn't a general purpose MFGPT driver, but * a simplified one designed specifically to act as a clock event source. @@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void) timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); if (!timer) { - printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); + printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n"); return -ENODEV; } cs5535_event_clock = timer; diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index cdc02ac8f41a..503996a94a6a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); + ret = -ENOMEM; goto out_free; } pcch_hdr = pcch_virt_addr; diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 2c9bf2692232..3265844839bf 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -678,10 +678,22 @@ static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, int cpu = (unsigned long)hcpu; struct cpuidle_device *dev; + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + case CPU_DOWN_PREPARE: + case CPU_ONLINE: + case CPU_DEAD: + case CPU_UP_CANCELED: + case CPU_DOWN_FAILED: + break; + default: + return NOTIFY_OK; + } + mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); - if (!dev->coupled) + if (!dev || !dev->coupled) goto out; switch (action & ~CPU_TASKS_FROZEN) { diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d45cf1bcbde5..d06ea2950dd9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -53,6 +53,7 @@ config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA && EXPERIMENTAL select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Platform has a PL08x DMAC device which can provide DMA engine support @@ -269,6 +270,7 @@ config DMA_SA11X0 tristate "SA-11x0 DMA support" depends on ARCH_SA1100 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the DMA engine found on Intel StrongARM SA-1100 and SA-1110 SoCs. This DMA engine can only be used with on-chip @@ -284,9 +286,18 @@ config MMP_TDMA Say Y here if you enabled MMP ADMA, otherwise say N. +config DMA_OMAP + tristate "OMAP DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config DMA_ENGINE bool +config DMA_VIRTUAL_CHANNELS + tristate + comment "DMA Clients" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 640356add0a3..4cf6b128ab9a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o +obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o @@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 49ecbbb8932d..6fbeebb9486f 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -86,10 +86,12 @@ #include <asm/hardware/pl080.h> #include "dmaengine.h" +#include "virt-dma.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; +struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives @@ -119,6 +121,123 @@ struct pl08x_lli { }; /** + * struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @serving: the virtual channel currently being served by this physical + * channel + * @locked: channel unavailable for the system, e.g. dedicated to secure + * world + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + struct pl08x_dma_chan *serving; + bool locked; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @vd: virtual DMA descriptor + * @dsg_list: list of children sg's + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + * @done: this marks completed descriptors, which should not have their + * mux released. + */ +struct pl08x_txd { + struct virt_dma_desc vd; + struct list_head dsg_list; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; + bool done; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @vc: wrappped virtual channel + * @phychan: the physical channel utilized by this channel, if there is one + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @signal: the physical DMA request signal which this channel is using + * @mux_use: count of descriptors using this DMA request signal setting + */ +struct pl08x_dma_chan { + struct virt_dma_chan vc; + struct pl08x_phy_chan *phychan; + const char *name; + const struct pl08x_channel_data *cd; + struct dma_slave_config cfg; + struct pl08x_txd *at; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + int signal; + unsigned mux_use; +}; + +/** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance * @memcpy: memcpy engine for this instance @@ -128,7 +247,6 @@ struct pl08x_lli { * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors - * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. @@ -143,10 +261,8 @@ struct pl08x_driver_data { struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; - int pool_ctr; u8 lli_buses; u8 mem_buses; - spinlock_t lock; }; /* @@ -162,12 +278,51 @@ struct pl08x_driver_data { static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { - return container_of(chan, struct pl08x_dma_chan, chan); + return container_of(chan, struct pl08x_dma_chan, vc.chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { - return container_of(tx, struct pl08x_txd, tx); + return container_of(tx, struct pl08x_txd, vd.tx); +} + +/* + * Mux handling. + * + * This gives us the DMA request input to the PL08x primecell which the + * peripheral described by the channel data will be routed to, possibly + * via a board/SoC specific external MUX. One important point to note + * here is that this does not depend on the physical channel. + */ +static int pl08x_request_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + int ret; + + if (plchan->mux_use++ == 0 && pd->get_signal) { + ret = pd->get_signal(plchan->cd); + if (ret < 0) { + plchan->mux_use = 0; + return ret; + } + + plchan->signal = ret; + } + return 0; +} + +static void pl08x_release_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + + if (plchan->signal >= 0) { + WARN_ON(plchan->mux_use == 0); + + if (--plchan->mux_use == 0 && pd->put_signal) { + pd->put_signal(plchan->cd, plchan->signal); + plchan->signal = -1; + } + } } /* @@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ -static void pl08x_start_txd(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; - struct pl08x_lli *lli = &txd->llis_va[0]; + struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_lli *lli; u32 val; + list_del(&txd->vd.node); + plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); + lli = &txd->llis_va[0]; + dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", @@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; - unsigned long flags; size_t bytes = 0; - spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; @@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) } } - /* Sum up all queued transactions */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *txdi; - list_for_each_entry(txdi, &plchan->pend_list, node) { - struct pl08x_sg *dsg; - list_for_each_entry(dsg, &txd->dsg_list, node) - bytes += dsg->len; - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - return bytes; } @@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, if (!ch->locked && !ch->serving) { ch->serving = virt_chan; - ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } @@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } - pm_runtime_get_sync(&pl08x->adev->dev); return ch; } +/* Mark the physical channel as free. Note, this write is atomic. */ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - unsigned long flags; + ch->serving = NULL; +} + +/* + * Try to allocate a physical channel. When successful, assign it to + * this virtual channel, and initiate the next descriptor. The + * virtual channel lock must be held at this point. + */ +static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_phy_chan *ch; - spin_lock_irqsave(&ch->lock, flags); + ch = pl08x_get_phy_channel(pl08x, plchan); + if (!ch) { + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); + plchan->state = PL08X_CHAN_WAITING; + return; + } - /* Stop the channel and clear its interrupts */ - pl08x_terminate_phy_chan(pl08x, ch); + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", + ch->id, plchan->name); - pm_runtime_put(&pl08x->adev->dev); + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} - /* Mark it as free */ - ch->serving = NULL; - spin_unlock_irqrestore(&ch->lock, flags); +static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, + struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + + dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", + ch->id, plchan->name); + + /* + * We do this without taking the lock; we're really only concerned + * about whether this pointer is NULL or not, and we're guaranteed + * that this will only be called when it _already_ is non-NULL. + */ + ch->serving = plchan; + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} + +/* + * Free a physical DMA channel, potentially reallocating it to another + * virtual channel if we have any pending. + */ +static void pl08x_phy_free(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_dma_chan *p, *next; + + retry: + next = NULL; + + /* Find a waiting virtual channel for the next transfer. */ + list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + + if (!next) { + list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + } + + /* Ensure that the physical channel is stopped */ + pl08x_terminate_phy_chan(pl08x, plchan->phychan); + + if (next) { + bool success; + + /* + * Eww. We know this isn't going to deadlock + * but lockdep probably doesn't. + */ + spin_lock(&next->vc.lock); + /* Re-check the state now that we have the lock */ + success = next->state == PL08X_CHAN_WAITING; + if (success) + pl08x_phy_reassign_start(plchan->phychan, next); + spin_unlock(&next->vc.lock); + + /* If the state changed, try to find another channel */ + if (!success) + goto retry; + } else { + /* No more jobs, so free up the physical channel */ + pl08x_put_phy_channel(pl08x, plchan->phychan); + } + + plchan->phychan = NULL; + plchan->state = PL08X_CHAN_IDLE; } /* @@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } - pl08x->pool_ctr++; - bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; @@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return num_llis; } -/* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; - /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); - pl08x->pool_ctr--; - list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); @@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, - struct pl08x_dma_chan *plchan) +static void pl08x_unmap_buffers(struct pl08x_txd *txd) { - struct pl08x_txd *txdi = NULL; - struct pl08x_txd *next; - - if (!list_empty(&plchan->pend_list)) { - list_for_each_entry_safe(txdi, - next, &plchan->pend_list, node) { - list_del(&txdi->node); - pl08x_free_txd(pl08x, txdi); + struct device *dev = txd->vd.tx.chan->device->dev; + struct pl08x_sg *dsg; + + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + else { + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); } } + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + else + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + } } -/* - * The DMA ENGINE API - */ -static int pl08x_alloc_chan_resources(struct dma_chan *chan) +static void pl08x_desc_free(struct virt_dma_desc *vd) { - return 0; -} + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); -static void pl08x_free_chan_resources(struct dma_chan *chan) -{ + if (!plchan->slave) + pl08x_unmap_buffers(txd); + + if (!txd->done) + pl08x_release_mux(plchan); + + pl08x_free_txd(plchan->host, txd); } -/* - * This should be called with the channel plchan->lock held - */ -static int prep_phy_channel(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, + struct pl08x_dma_chan *plchan) { - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_phy_chan *ch; - int ret; - - /* Check if we already have a channel */ - if (plchan->phychan) { - ch = plchan->phychan; - goto got_channel; - } + LIST_HEAD(head); + struct pl08x_txd *txd; - ch = pl08x_get_phy_channel(pl08x, plchan); - if (!ch) { - /* No physical channel available, cope with it */ - dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); - return -EBUSY; - } + vchan_get_all_descriptors(&plchan->vc, &head); - /* - * OK we have a physical channel: for memcpy() this is all we - * need, but for slaves the physical signals may be muxed! - * Can the platform allow us to use this channel? - */ - if (plchan->slave && pl08x->pd->get_signal) { - ret = pl08x->pd->get_signal(plchan); - if (ret < 0) { - dev_dbg(&pl08x->adev->dev, - "unable to use physical channel %d for transfer on %s due to platform restrictions\n", - ch->id, plchan->name); - /* Release physical channel & return */ - pl08x_put_phy_channel(pl08x, ch); - return -EBUSY; - } - ch->signal = ret; + while (!list_empty(&head)) { + txd = list_first_entry(&head, struct pl08x_txd, vd.node); + list_del(&txd->vd.node); + pl08x_desc_free(&txd->vd); } - - plchan->phychan = ch; - dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", - ch->id, - ch->signal, - plchan->name); - -got_channel: - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_MEM_TO_DEV) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_DEV_TO_MEM) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; - - plchan->phychan_hold++; - - return 0; } -static void release_phy_channel(struct pl08x_dma_chan *plchan) +/* + * The DMA ENGINE API + */ +static int pl08x_alloc_chan_resources(struct dma_chan *chan) { - struct pl08x_driver_data *pl08x = plchan->host; - - if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { - pl08x->pd->put_signal(plchan); - plchan->phychan->signal = -1; - } - pl08x_put_phy_channel(pl08x, plchan->phychan); - plchan->phychan = NULL; + return 0; } -static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) +static void pl08x_free_chan_resources(struct dma_chan *chan) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); - struct pl08x_txd *txd = to_pl08x_txd(tx); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&plchan->lock, flags); - cookie = dma_cookie_assign(tx); - - /* Put this onto the pending list */ - list_add_tail(&txd->node, &plchan->pend_list); - - /* - * If there was no physical channel available for this memcpy, - * stack the request up and indicate that the channel is waiting - * for a free physical channel. - */ - if (!plchan->slave && !plchan->phychan) { - /* Do this memcpy whenever there is a channel ready */ - plchan->state = PL08X_CHAN_WAITING; - plchan->waiting = txd; - } else { - plchan->phychan_hold--; - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - return cookie; + /* Ensure all queued descriptors are freed */ + vchan_free_chan_resources(to_virt_chan(chan)); } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( @@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; enum dma_status ret; + size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) { + if (plchan->state == PL08X_CHAN_PAUSED) + ret = DMA_PAUSED; + return ret; + } + + spin_lock_irqsave(&plchan->vc.lock, flags); + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + vd = vchan_find_desc(&plchan->vc, cookie); + if (vd) { + /* On the issued list, so hasn't been processed yet */ + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_sg *dsg; + + list_for_each_entry(dsg, &txd->dsg_list, node) + bytes += dsg->len; + } else { + bytes = pl08x_getbytes_chan(plchan); + } + } + spin_unlock_irqrestore(&plchan->vc.lock, flags); + + /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ - dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); + dma_set_residue(txstate, bytes); - if (plchan->state == PL08X_CHAN_PAUSED) - return DMA_PAUSED; + if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; /* Whether waiting or running, we're in progress */ - return DMA_IN_PROGRESS; + return ret; } /* PrimeCell DMA extension */ @@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst) return burst_sizes[i].reg; } -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, + enum dma_slave_buswidth addr_width, u32 maxburst) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct pl08x_driver_data *pl08x = plchan->host; - enum dma_slave_buswidth addr_width; - u32 width, burst, maxburst; - u32 cctl = 0; - - if (!plchan->slave) - return -EINVAL; - - /* Transfer direction */ - plchan->runtime_direction = config->direction; - if (config->direction == DMA_MEM_TO_DEV) { - addr_width = config->dst_addr_width; - maxburst = config->dst_maxburst; - } else if (config->direction == DMA_DEV_TO_MEM) { - addr_width = config->src_addr_width; - maxburst = config->src_maxburst; - } else { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien transfer direction\n"); - return -EINVAL; - } + u32 width, burst, cctl = 0; width = pl08x_width(addr_width); - if (width == ~0) { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien address width\n"); - return -EINVAL; - } + if (width == ~0) + return ~0; cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; @@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - plchan->device_fc = config->device_fc; + return pl08x_cctl(cctl); +} - if (plchan->runtime_direction == DMA_DEV_TO_MEM) { - plchan->src_addr = config->src_addr; - plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | - pl08x_select_bus(plchan->cd->periph_buses, - pl08x->mem_buses); - } else { - plchan->dst_addr = config->dst_addr; - plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(pl08x->mem_buses, - plchan->cd->periph_buses); - } +static int dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - dev_dbg(&pl08x->adev->dev, - "configured channel %s (%s) for %s, data width %d, " - "maxburst %d words, LE, CCTL=0x%08x\n", - dma_chan_name(chan), plchan->name, - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", - addr_width, - maxburst, - cctl); + if (!plchan->slave) + return -EINVAL; + + /* Reject definitely invalid configurations */ + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + plchan->cfg = *config; return 0; } @@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); - /* Something is already active, or we're waiting for a channel... */ - if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { - spin_unlock_irqrestore(&plchan->lock, flags); - return; - } - - /* Take the first element in the queue and execute it */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - plchan->state = PL08X_CHAN_RUNNING; - - pl08x_start_txd(plchan, next); + spin_lock_irqsave(&plchan->vc.lock, flags); + if (vchan_issue_pending(&plchan->vc)) { + if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) + pl08x_phy_alloc_and_start(plchan); } - - spin_unlock_irqrestore(&plchan->lock, flags); -} - -static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) -{ - struct pl08x_driver_data *pl08x = plchan->host; - unsigned long flags; - int num_llis, ret; - - num_llis = pl08x_fill_llis_for_desc(pl08x, txd); - if (!num_llis) { - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EINVAL; - } - - spin_lock_irqsave(&plchan->lock, flags); - - /* - * See if we already have a physical channel allocated, - * else this is the time to try to get one. - */ - ret = prep_phy_channel(plchan, txd); - if (ret) { - /* - * No physical channel was available. - * - * memcpy transfers can be sorted out at submission time. - * - * Slave transfers may have been denied due to platform - * channel muxing restrictions. Since there is no guarantee - * that this will ever be resolved, and the signal must be - * acquired AFTER acquiring the physical channel, we will let - * them be NACK:ed with -EBUSY here. The drivers can retry - * the prep() call if they are eager on doing this using DMA. - */ - if (plchan->slave) { - pl08x_free_txd_list(pl08x, plchan); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EBUSY; - } - } else - /* - * Else we're all set, paused and ready to roll, status - * will switch to PL08X_CHAN_RUNNING when we call - * issue_pending(). If there is something running on the - * channel already we don't change its state. - */ - if (plchan->state == PL08X_CHAN_IDLE) - plchan->state = PL08X_CHAN_PAUSED; - - spin_unlock_irqrestore(&plchan->lock, flags); - - return 0; + spin_unlock_irqrestore(&plchan->vc.lock, flags); } -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, - unsigned long flags) +static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { - dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = pl08x_tx_submit; - INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ @@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_sg *dsg; int ret; - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); @@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( } list_add_tail(&dsg->node, &txd->dsg_list); - txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl & + txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ @@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( @@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; + enum dma_slave_buswidth addr_width; dma_addr_t slave_addr; int ret, tmp; + u8 src_buses, dst_buses; + u32 maxburst, cctl; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } - if (direction != plchan->runtime_direction) - dev_err(&pl08x->adev->dev, "%s DMA setup does not match " - "the direction configured for the PrimeCell\n", - __func__); - /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ - txd->direction = direction; - if (direction == DMA_MEM_TO_DEV) { - txd->cctl = plchan->dst_cctl; - slave_addr = plchan->dst_addr; + cctl = PL080_CONTROL_SRC_INCR; + slave_addr = plchan->cfg.dst_addr; + addr_width = plchan->cfg.dst_addr_width; + maxburst = plchan->cfg.dst_maxburst; + src_buses = pl08x->mem_buses; + dst_buses = plchan->cd->periph_buses; } else if (direction == DMA_DEV_TO_MEM) { - txd->cctl = plchan->src_cctl; - slave_addr = plchan->src_addr; + cctl = PL080_CONTROL_DST_INCR; + slave_addr = plchan->cfg.src_addr; + addr_width = plchan->cfg.src_addr_width; + maxburst = plchan->cfg.src_maxburst; + src_buses = plchan->cd->periph_buses; + dst_buses = pl08x->mem_buses; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, @@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } - if (plchan->device_fc) + cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); + if (cctl == ~0) { + pl08x_free_txd(pl08x, txd); + dev_err(&pl08x->adev->dev, + "DMA slave configuration botched?\n"); + return NULL; + } + + txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); + + if (plchan->cfg.device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else @@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + ret = pl08x_request_mux(plchan); + if (ret < 0) { + pl08x_free_txd(pl08x, txd); + dev_dbg(&pl08x->adev->dev, + "unable to mux for transfer on %s due to platform restrictions\n", + plchan->name); + return NULL; + } + + dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", + plchan->signal, plchan->name); + + /* Assign the flow control signal to this channel */ + if (direction == DMA_MEM_TO_DEV) + txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; + else + txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; + for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { + pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); @@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } } - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->lock, flags); + spin_lock_irqsave(&plchan->vc.lock, flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return 0; } @@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_terminate_phy_chan(pl08x, plchan->phychan); - /* * Mark physical channel as free and free any slave * signal */ - release_phy_channel(plchan); - plchan->phychan_hold = 0; + pl08x_phy_free(plchan); } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_free_txd(pl08x, plchan->at); + pl08x_desc_free(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return ret; } @@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - -static void pl08x_tasklet(unsigned long data) -{ - struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_txd *txd; - unsigned long flags; - - spin_lock_irqsave(&plchan->lock, flags); - - txd = plchan->at; - plchan->at = NULL; - - if (txd) { - /* Update last completed */ - dma_cookie_complete(&txd->tx); - } - - /* If a new descriptor is queued, set it up plchan->at is NULL here */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - - pl08x_start_txd(plchan, next); - } else if (plchan->phychan_hold) { - /* - * This channel is still in use - we have a new txd being - * prepared and will soon be queued. Don't give up the - * physical channel. - */ - } else { - struct pl08x_dma_chan *waiting = NULL; - - /* - * No more jobs, so free up the physical channel - * Free any allocated signal on slave transfers too - */ - release_phy_channel(plchan); - plchan->state = PL08X_CHAN_IDLE; - - /* - * And NOW before anyone else can grab that free:d up - * physical channel, see if there is some memcpy pending - * that seriously needs to start because of being stacked - * up while we were choking the physical channels with data. - */ - list_for_each_entry(waiting, &pl08x->memcpy.channels, - chan.device_node) { - if (waiting->state == PL08X_CHAN_WAITING && - waiting->waiting != NULL) { - int ret; - - /* This should REALLY not fail now */ - ret = prep_phy_channel(waiting, - waiting->waiting); - BUG_ON(ret); - waiting->phychan_hold--; - waiting->state = PL08X_CHAN_RUNNING; - waiting->waiting = NULL; - pl08x_issue_pending(&waiting->chan); - break; - } - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - if (txd) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - /* Don't try to unmap buffers on slave channels */ - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - /* Free the descriptor */ - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - - /* Callback to signal completion */ - if (callback) - callback(callback_param); - } -} - static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; @@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; + struct pl08x_txd *tx; if (!plchan) { dev_err(&pl08x->adev->dev, @@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev) continue; } - /* Schedule tasklet on this channel */ - tasklet_schedule(&plchan->tasklet); + spin_lock(&plchan->vc.lock); + tx = plchan->at; + if (tx) { + plchan->at = NULL; + /* + * This descriptor is done, release its mux + * reservation. + */ + pl08x_release_mux(plchan); + tx->done = true; + vchan_cookie_complete(&tx->vd); + + /* + * And start the next descriptor (if any), + * otherwise free this channel. + */ + if (vchan_next_desc(&plchan->vc)) + pl08x_start_next_txd(plchan); + else + pl08x_phy_free(plchan); + } + spin_unlock(&plchan->vc.lock); + mask |= (1 << i); } } @@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev) static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { - u32 cctl = pl08x_cctl(chan->cd->cctl); - chan->slave = true; chan->name = chan->cd->bus_id; - chan->src_addr = chan->cd->addr; - chan->dst_addr = chan->cd->addr; - chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | - pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); - chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); + chan->cfg.src_addr = chan->cd->addr; + chan->cfg.dst_addr = chan->cd->addr; } /* @@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; + chan->signal = -1; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; @@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } - if (chan->cd->circular_buffer) { - dev_err(&pl08x->adev->dev, - "channel %s: circular buffers not supported\n", - chan->name); - kfree(chan); - continue; - } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); - chan->chan.device = dmadev; - dma_cookie_init(&chan->chan); - - spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pend_list); - tasklet_init(&chan->tasklet, pl08x_tasklet, - (unsigned long) chan); - - list_add_tail(&chan->chan.device_node, &dmadev->channels); + chan->vc.desc_free = pl08x_desc_free; + vchan_init(&chan->vc, dmadev); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); @@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, chan.device_node) { - list_del(&chan->chan.device_node); + next, &dmadev->channels, vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); kfree(chan); } } @@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } - pm_runtime_set_active(&adev->dev); - pm_runtime_enable(&adev->dev); - /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } - spin_lock_init(&pl08x->lock); - pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; @@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->signal = -1; /* * Nomadik variants can have channels that are locked @@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); - pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2026,9 +2034,6 @@ out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: - pm_runtime_put(&adev->dev); - pm_runtime_disable(&adev->dev); - kfree(pl08x); out_no_pl08x: amba_release_regions(adev); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index fcfeb3cd8d31..5084975d793c 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -172,7 +172,8 @@ struct imxdma_engine { struct device_dma_parameters dma_parms; struct dma_device dma_device; void __iomem *base; - struct clk *dma_clk; + struct clk *dma_ahb; + struct clk *dma_ipg; spinlock_t lock; struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; @@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; } - imxdma->dma_clk = clk_get(NULL, "dma"); - if (IS_ERR(imxdma->dma_clk)) - return PTR_ERR(imxdma->dma_clk); - clk_enable(imxdma->dma_clk); + imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imxdma->dma_ipg)) { + ret = PTR_ERR(imxdma->dma_ipg); + goto err_clk; + } + + imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(imxdma->dma_ahb)) { + ret = PTR_ERR(imxdma->dma_ahb); + goto err_clk; + } + + clk_prepare_enable(imxdma->dma_ipg); + clk_prepare_enable(imxdma->dma_ahb); /* reset DMA module */ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); @@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev) ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); - kfree(imxdma); - return ret; + goto err_enable; } ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); - kfree(imxdma); - return ret; + goto err_enable; } } @@ -1094,7 +1103,10 @@ err_init: free_irq(MX1_DMA_INT, NULL); free_irq(MX1_DMA_ERR, NULL); } - +err_enable: + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); +err_clk: kfree(imxdma); return ret; } @@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev) free_irq(MX1_DMA_ERR, NULL); } - kfree(imxdma); + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); + kfree(imxdma); return 0; } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c new file mode 100644 index 000000000000..ae0561826137 --- /dev/null +++ b/drivers/dma/omap-dma.c @@ -0,0 +1,669 @@ +/* + * OMAP DMAengine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/omap-dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" +#include <plat/dma.h> + +struct omap_dmadev { + struct dma_device ddev; + spinlock_t lock; + struct tasklet_struct task; + struct list_head pending; +}; + +struct omap_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + unsigned dma_sig; + bool cyclic; + + int dma_ch; + struct omap_desc *desc; + unsigned sgidx; +}; + +struct omap_sg { + dma_addr_t addr; + uint32_t en; /* number of elements (24-bit) */ + uint32_t fn; /* number of frames (16-bit) */ +}; + +struct omap_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + dma_addr_t dev_addr; + + int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ + uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ + uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ + uint8_t periph_port; /* Peripheral port */ + + unsigned sglen; + struct omap_sg sg[0]; +}; + +static const unsigned es_bytes[] = { + [OMAP_DMA_DATA_TYPE_S8] = 1, + [OMAP_DMA_DATA_TYPE_S16] = 2, + [OMAP_DMA_DATA_TYPE_S32] = 4, +}; + +static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) +{ + return container_of(d, struct omap_dmadev, ddev); +} + +static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct omap_chan, vc.chan); +} + +static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct omap_desc, vd.tx); +} + +static void omap_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct omap_desc, vd)); +} + +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, + unsigned idx) +{ + struct omap_sg *sg = d->sg + idx; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + else + omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + + omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, + d->sync_mode, c->dma_sig, d->sync_type); + + omap_start_dma(c->dma_ch); +} + +static void omap_dma_start_desc(struct omap_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct omap_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_omap_dma_desc(&vd->tx); + c->sgidx = 0; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_src_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + else + omap_set_dma_dest_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + + omap_dma_start_sg(c, d, 0); +} + +static void omap_dma_callback(int ch, u16 status, void *data) +{ + struct omap_chan *c = data; + struct omap_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + d = c->desc; + if (d) { + if (!c->cyclic) { + if (++c->sgidx < d->sglen) { + omap_dma_start_sg(c, d, c->sgidx); + } else { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } + } else { + vchan_cyclic_callback(&d->vd); + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +/* + * This callback schedules all pending channels. We could be more + * clever here by postponing allocation of the real DMA channels to + * this point, and freeing them when our virtual channel becomes idle. + * + * We would then need to deal with 'all channels in-use' + */ +static void omap_dma_sched(unsigned long data) +{ + struct omap_dmadev *d = (struct omap_dmadev *)data; + LIST_HEAD(head); + + spin_lock_irq(&d->lock); + list_splice_tail_init(&d->pending, &head); + spin_unlock_irq(&d->lock); + + while (!list_empty(&head)) { + struct omap_chan *c = list_first_entry(&head, + struct omap_chan, node); + + spin_lock_irq(&c->vc.lock); + list_del_init(&c->node); + omap_dma_start_desc(c); + spin_unlock_irq(&c->vc.lock); + } +} + +static int omap_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + + return omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); +} + +static void omap_dma_free_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + omap_free_dma(c->dma_ch); + + dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); +} + +static size_t omap_dma_sg_size(struct omap_sg *sg) +{ + return sg->en * sg->fn; +} + +static size_t omap_dma_desc_size(struct omap_desc *d) +{ + unsigned i; + size_t size; + + for (size = i = 0; i < d->sglen; i++) + size += omap_dma_sg_size(&d->sg[i]); + + return size * es_bytes[d->es]; +} + +static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) +{ + unsigned i; + size_t size, es_size = es_bytes[d->es]; + + for (size = i = 0; i < d->sglen; i++) { + size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; + + if (size) + size += this_size; + else if (addr >= d->sg[i].addr && + addr < d->sg[i].addr + this_size) + size += d->sg[i].addr + this_size - addr; + } + return size; +} + +static enum dma_status omap_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_SUCCESS || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct omap_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = omap_get_dma_src_pos(c->dma_ch); + else if (d->dir == DMA_DEV_TO_MEM) + pos = omap_get_dma_dst_pos(c->dma_ch); + else + pos = 0; + + txstate->residue = omap_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void omap_dma_issue_pending(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned i, j = 0, es, en, frame_bytes, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_FRAME; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_TIPB; + + /* + * Build our scatterlist entries: each contains the address, + * the number of elements (EN) in each frame, and the number of + * frames (FN). Number of bytes for this entry = ES * EN * FN. + * + * Burst size translates to number of elements with frame sync. + * Note: DMA engine defines burst to be the number of dev-width + * transfers. + */ + en = burst; + frame_bytes = es_bytes[es] * en; + for_each_sg(sgl, sgent, sglen, i) { + d->sg[j].addr = sg_dma_address(sgent); + d->sg[j].en = en; + d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; + j++; + } + + d->sglen = j; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned es, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->fi = burst; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_PACKET; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_MPUI; + d->sg[0].addr = buf_addr; + d->sg[0].en = period_len / es_bytes[es]; + d->sg[0].fn = buf_len / period_len; + d->sglen = 1; + + if (!c->cyclic) { + c->cyclic = true; + omap_dma_link_lch(c->dma_ch, c->dma_ch); + omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); + omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); + } + + if (!cpu_class_is_omap1()) { + omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + } + + return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); +} + +static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) +{ + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + memcpy(&c->cfg, cfg, sizeof(c->cfg)); + + return 0; +} + +static int omap_dma_terminate_all(struct omap_chan *c) +{ + struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after omap_stop_dma() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + omap_stop_dma(c->dma_ch); + } + + if (c->cyclic) { + c->cyclic = false; + omap_dma_unlink_lch(c->dma_ch, c->dma_ch); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int omap_dma_pause(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_resume(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + int ret; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); + break; + + case DMA_TERMINATE_ALL: + ret = omap_dma_terminate_all(c); + break; + + case DMA_PAUSE: + ret = omap_dma_pause(c); + break; + + case DMA_RESUME: + ret = omap_dma_resume(c); + break; + + default: + ret = -ENXIO; + break; + } + + return ret; +} + +static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) +{ + struct omap_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->dma_sig = dma_sig; + c->vc.desc_free = omap_dma_desc_free; + vchan_init(&c->vc, &od->ddev); + INIT_LIST_HEAD(&c->node); + + od->ddev.chancnt++; + + return 0; +} + +static void omap_dma_free(struct omap_dmadev *od) +{ + tasklet_kill(&od->task); + while (!list_empty(&od->ddev.channels)) { + struct omap_chan *c = list_first_entry(&od->ddev.channels, + struct omap_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + kfree(c); + } + kfree(od); +} + +static int omap_dma_probe(struct platform_device *pdev) +{ + struct omap_dmadev *od; + int rc, i; + + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; + od->ddev.device_tx_status = omap_dma_tx_status; + od->ddev.device_issue_pending = omap_dma_issue_pending; + od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_control = omap_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + INIT_LIST_HEAD(&od->pending); + spin_lock_init(&od->lock); + + tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); + + for (i = 0; i < 127; i++) { + rc = omap_dma_chan_init(od, i); + if (rc) { + omap_dma_free(od); + return rc; + } + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", + rc); + omap_dma_free(od); + } else { + platform_set_drvdata(pdev, od); + } + + dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + + return rc; +} + +static int omap_dma_remove(struct platform_device *pdev) +{ + struct omap_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + + return 0; +} + +static struct platform_driver omap_dma_driver = { + .probe = omap_dma_probe, + .remove = omap_dma_remove, + .driver = { + .name = "omap-dma-engine", + .owner = THIS_MODULE, + }, +}; + +bool omap_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &omap_dma_driver.driver) { + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned req = *(unsigned *)param; + + return req == c->dma_sig; + } + return false; +} +EXPORT_SYMBOL_GPL(omap_dma_filter_fn); + +static struct platform_device *pdev; + +static const struct platform_device_info omap_dma_dev_info = { + .name = "omap-dma-engine", + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int omap_dma_init(void) +{ + int rc = platform_driver_register(&omap_dma_driver); + + if (rc == 0) { + pdev = platform_device_register_full(&omap_dma_dev_info); + if (IS_ERR(pdev)) { + platform_driver_unregister(&omap_dma_driver); + rc = PTR_ERR(pdev); + } + } + return rc; +} +subsys_initcall(omap_dma_init); + +static void __exit omap_dma_exit(void) +{ + platform_device_unregister(pdev); + platform_driver_unregister(&omap_dma_driver); +} +module_exit(omap_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ec78ccef9132..f5a73606217e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -21,6 +21,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#include "virt-dma.h" + #define NR_PHY_CHAN 6 #define DMA_ALIGN 3 #define DMA_MAX_SIZE 0x1fff @@ -72,12 +74,13 @@ struct sa11x0_dma_sg { }; struct sa11x0_dma_desc { - struct dma_async_tx_descriptor tx; + struct virt_dma_desc vd; + u32 ddar; size_t size; + unsigned period; + bool cyclic; - /* maybe protected by c->lock */ - struct list_head node; unsigned sglen; struct sa11x0_dma_sg sg[0]; }; @@ -85,15 +88,11 @@ struct sa11x0_dma_desc { struct sa11x0_dma_phy; struct sa11x0_dma_chan { - struct dma_chan chan; - spinlock_t lock; - dma_cookie_t lc; + struct virt_dma_chan vc; - /* protected by c->lock */ + /* protected by c->vc.lock */ struct sa11x0_dma_phy *phy; enum dma_status status; - struct list_head desc_submitted; - struct list_head desc_issued; /* protected by d->lock */ struct list_head node; @@ -109,7 +108,7 @@ struct sa11x0_dma_phy { struct sa11x0_dma_chan *vchan; - /* Protected by c->lock */ + /* Protected by c->vc.lock */ unsigned sg_load; struct sa11x0_dma_desc *txd_load; unsigned sg_done; @@ -127,13 +126,12 @@ struct sa11x0_dma_dev { spinlock_t lock; struct tasklet_struct task; struct list_head chan_pending; - struct list_head desc_complete; struct sa11x0_dma_phy phy[NR_PHY_CHAN]; }; static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) { - return container_of(chan, struct sa11x0_dma_chan, chan); + return container_of(chan, struct sa11x0_dma_chan, vc.chan); } static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) @@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) return container_of(dmadev, struct sa11x0_dma_dev, slave); } -static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) +static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) { - return container_of(tx, struct sa11x0_dma_desc, tx); + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + + return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; } -static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) +static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) { - if (list_empty(&c->desc_issued)) - return NULL; - - return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); + kfree(container_of(vd, struct sa11x0_dma_desc, vd)); } static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) { - list_del(&txd->node); + list_del(&txd->vd.node); p->txd_load = txd; p->sg_load = 0; dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", - p->num, txd, txd->tx.cookie, txd->ddar); + p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); } static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, @@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, return; if (p->sg_load == txd->sglen) { - struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); + if (!txd->cyclic) { + struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); - /* - * We have reached the end of the current descriptor. - * Peek at the next descriptor, and if compatible with - * the current, start processing it. - */ - if (txn && txn->ddar == txd->ddar) { - txd = txn; - sa11x0_dma_start_desc(p, txn); + /* + * We have reached the end of the current descriptor. + * Peek at the next descriptor, and if compatible with + * the current, start processing it. + */ + if (txn && txn->ddar == txd->ddar) { + txd = txn; + sa11x0_dma_start_desc(p, txn); + } else { + p->txd_load = NULL; + return; + } } else { - p->txd_load = NULL; - return; + /* Cyclic: reset back to beginning */ + p->sg_load = 0; } } @@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd = p->txd_done; if (++p->sg_done == txd->sglen) { - struct sa11x0_dma_dev *d = p->dev; - - dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", - p->num, p->txd_done, p->txd_done->tx.cookie); - - c->lc = txd->tx.cookie; + if (!txd->cyclic) { + vchan_cookie_complete(&txd->vd); - spin_lock(&d->lock); - list_add_tail(&txd->node, &d->desc_complete); - spin_unlock(&d->lock); + p->sg_done = 0; + p->txd_done = p->txd_load; - p->sg_done = 0; - p->txd_done = p->txd_load; + if (!p->txd_done) + tasklet_schedule(&p->dev->task); + } else { + if ((p->sg_done % txd->period) == 0) + vchan_cyclic_callback(&txd->vd); - tasklet_schedule(&d->task); + /* Cyclic: reset back to beginning */ + p->sg_done = 0; + } } sa11x0_dma_start_sg(p, c); @@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (c) { unsigned long flags; - spin_lock_irqsave(&c->lock, flags); + spin_lock_irqsave(&c->vc.lock, flags); /* * Now that we're holding the lock, check that the vchan * really is associated with this pchan before touching the @@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (dcsr & DCSR_DONEB) sa11x0_dma_complete(p, c); } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); } return IRQ_HANDLED; @@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg) struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; struct sa11x0_dma_phy *p; struct sa11x0_dma_chan *c; - struct sa11x0_dma_desc *txd, *txn; - LIST_HEAD(head); unsigned pch, pch_alloc = 0; dev_dbg(d->slave.dev, "tasklet enter\n"); - /* Get the completed tx descriptors */ - spin_lock_irq(&d->lock); - list_splice_init(&d->desc_complete, &head); - spin_unlock_irq(&d->lock); - - list_for_each_entry(txd, &head, node) { - c = to_sa11x0_dma_chan(txd->tx.chan); - - dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", - c, txd, txd->tx.cookie); - - spin_lock_irq(&c->lock); + list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { + spin_lock_irq(&c->vc.lock); p = c->phy; - if (p) { - if (!p->txd_done) - sa11x0_dma_start_txd(c); + if (p && !p->txd_done) { + sa11x0_dma_start_txd(c); if (!p->txd_done) { /* No current txd associated with this channel */ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); @@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) p->vchan = NULL; } } - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } spin_lock_irq(&d->lock); @@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) /* Mark this channel allocated */ p->vchan = c; - dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); + dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); } } spin_unlock_irq(&d->lock); @@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg) p = &d->phy[pch]; c = p->vchan; - spin_lock_irq(&c->lock); + spin_lock_irq(&c->vc.lock); c->phy = p; sa11x0_dma_start_txd(c); - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } } - /* Now free the completed tx descriptor, and call their callbacks */ - list_for_each_entry_safe(txd, txn, &head, node) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", - txd, txd->tx.cookie); - - kfree(txd); - - if (callback) - callback(callback_param); - } - dev_dbg(d->slave.dev, "tasklet exit\n"); } -static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) -{ - struct sa11x0_dma_desc *txd, *txn; - - list_for_each_entry_safe(txd, txn, head, node) { - dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); - kfree(txd); - } -} - static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) { return 0; @@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - LIST_HEAD(head); - spin_lock_irqsave(&c->lock, flags); - spin_lock(&d->lock); + spin_lock_irqsave(&d->lock, flags); list_del_init(&c->node); - spin_unlock(&d->lock); - - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&d->lock, flags); - sa11x0_dma_desc_free(d, &head); + vchan_free_chan_resources(&c->vc); } static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) @@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; - struct sa11x0_dma_desc *txd; - dma_cookie_t last_used, last_complete; + struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; - size_t bytes = 0; - - last_used = c->chan.cookie; - last_complete = c->lc; - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) { - dma_set_tx_state(state, last_complete, last_used, 0); + ret = dma_cookie_status(&c->vc.chan, cookie, state); + if (ret == DMA_SUCCESS) return ret; - } - spin_lock_irqsave(&c->lock, flags); + if (!state) + return c->status; + + spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; - ret = c->status; - if (p) { - dma_addr_t addr = sa11x0_dma_pos(p); - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; + } else if (!p) { + state->residue = 0; + } else { + struct sa11x0_dma_desc *txd; + size_t bytes = 0; - txd = p->txd_done; + if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) + txd = p->txd_done; + else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) + txd = p->txd_load; + else + txd = NULL; + + ret = c->status; if (txd) { + dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", i, txd->sg[i].addr, txd->sg[i].len); @@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, bytes += txd->sg[i].len; } } - if (txd != p->txd_load && p->txd_load) - bytes += p->txd_load->size; - } - list_for_each_entry(txd, &c->desc_issued, node) { - bytes += txd->size; + state->residue = bytes; } - spin_unlock_irqrestore(&c->lock, flags); - - dma_set_tx_state(state, last_complete, last_used, bytes); + spin_unlock_irqrestore(&c->vc.lock, flags); - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); return ret; } @@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &c->desc_issued); - if (!list_empty(&c->desc_issued)) { - spin_lock(&d->lock); - if (!c->phy && list_empty(&c->node)) { - list_add_tail(&c->node, &d->chan_pending); - tasklet_schedule(&d->task); - dev_dbg(d->slave.dev, "vchan %p: issued\n", c); + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + if (!c->phy) { + spin_lock(&d->lock); + if (list_empty(&c->node)) { + list_add_tail(&c->node, &d->chan_pending); + tasklet_schedule(&d->task); + dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); + } + spin_unlock(&d->lock); } - spin_unlock(&d->lock); } else - dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); - spin_unlock_irqrestore(&c->lock, flags); -} - -static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); - struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); - unsigned long flags; - - spin_lock_irqsave(&c->lock, flags); - c->chan.cookie += 1; - if (c->chan.cookie < 0) - c->chan.cookie = 1; - txd->tx.cookie = c->chan.cookie; - - list_add_tail(&txd->node, &c->desc_submitted); - spin_unlock_irqrestore(&c->lock, flags); - - dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", - c, txd, txd->tx.cookie); - - return txd->tx.cookie; + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); + spin_unlock_irqrestore(&c->vc.lock, flags); } static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( @@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( /* SA11x0 channels can only operate in their native direction */ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", - c, c->ddar, dir); + &c->vc, c->ddar, dir); return NULL; } @@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", - c, addr); + &c->vc, addr); return NULL; } } txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); if (!txd) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); return NULL; } @@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( } while (len); } - dma_async_tx_descriptor_init(&txd->tx, &c->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = sa11x0_dma_tx_submit; txd->ddar = c->ddar; txd->size = size; txd->sglen = j; dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", - c, txd, txd->size, txd->sglen); + &c->vc, &txd->vd, txd->size, txd->sglen); - return &txd->tx; + return vchan_tx_prep(&c->vc, &txd->vd, flags); +} + +static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, + enum dma_transfer_direction dir, void *context) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_desc *txd; + unsigned i, j, k, sglen, sgperiod; + + /* SA11x0 channels can only operate in their native direction */ + if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { + dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", + &c->vc, c->ddar, dir); + return NULL; + } + + sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); + sglen = size * sgperiod / period; + + /* Do not allow zero-sized txds */ + if (sglen == 0) + return NULL; + + txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); + if (!txd) { + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); + return NULL; + } + + for (i = k = 0; i < size / period; i++) { + size_t tlen, len = period; + + for (j = 0; j < sgperiod; j++, k++) { + tlen = len; + + if (tlen > DMA_MAX_SIZE) { + unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); + tlen = (tlen / mult) & ~DMA_ALIGN; + } + + txd->sg[k].addr = addr; + txd->sg[k].len = tlen; + addr += tlen; + len -= tlen; + } + + WARN_ON(len != 0); + } + + WARN_ON(k != sglen); + + txd->ddar = c->ddar; + txd->size = size; + txd->sglen = sglen; + txd->cyclic = 1; + txd->period = sgperiod; + + return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) @@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c if (maxburst == 8) ddar |= DDAR_BS; - dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", - c, addr, width, maxburst); + dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", + &c->vc, addr, width, maxburst); c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; @@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); p = c->phy; if (p) { - struct sa11x0_dma_desc *txd, *txn; - dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); /* vchan is assigned to a pchan - stop the channel */ writel(DCSR_RUN | DCSR_IE | @@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, DCSR_STRTB | DCSR_DONEB, p->base + DMA_DCSR_C); - list_for_each_entry_safe(txd, txn, &d->desc_complete, node) - if (txd->tx.chan == &c->chan) - list_move(&txd->node, &head); - if (p->txd_load) { if (p->txd_load != p->txd_done) - list_add_tail(&p->txd_load->node, &head); + list_add_tail(&p->txd_load->vd.node, &head); p->txd_load = NULL; } if (p->txd_done) { - list_add_tail(&p->txd_done->node, &head); + list_add_tail(&p->txd_done->vd.node, &head); p->txd_done = NULL; } c->phy = NULL; @@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); tasklet_schedule(&d->task); } - spin_unlock_irqrestore(&c->lock, flags); - sa11x0_dma_desc_free(d, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); ret = 0; break; case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_IN_PROGRESS) { c->status = DMA_PAUSED; @@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_PAUSED) { c->status = DMA_IN_PROGRESS; p = c->phy; if (p) { writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); - } else if (!list_empty(&c->desc_issued)) { + } else if (!list_empty(&c->vc.desc_issued)) { spin_lock(&d->lock); list_add_tail(&c->node, &d->chan_pending); spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; @@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, return -ENOMEM; } - c->chan.device = dmadev; c->status = DMA_IN_PROGRESS; c->ddar = chan_desc[i].ddar; c->name = chan_desc[i].name; - spin_lock_init(&c->lock); - INIT_LIST_HEAD(&c->desc_submitted); - INIT_LIST_HEAD(&c->desc_issued); INIT_LIST_HEAD(&c->node); - list_add_tail(&c->chan.device_node, &dmadev->channels); + + c->vc.desc_free = sa11x0_dma_free_desc; + vchan_init(&c->vc, dmadev); } return dma_async_device_register(dmadev); @@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev) { struct sa11x0_dma_chan *c, *cn; - list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { - list_del(&c->chan.device_node); + list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); kfree(c); } } @@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); - INIT_LIST_HEAD(&d->desc_complete); d->base = ioremap(res->start, resource_size(res)); if (!d->base) { @@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) } dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); if (ret) { dev_warn(d->slave.dev, "failed to register slave async device: %d\n", diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 27f5c781fd73..f4cd946d259d 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, new->mark = DESC_PREPARED; new->async_tx.flags = flags; new->direction = direction; + new->partial = 0; *len -= copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) @@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, case DMA_TERMINATE_ALL: spin_lock_irqsave(&schan->chan_lock, flags); ops->halt_channel(schan); + + if (ops->get_partial && !list_empty(&schan->ld_queue)) { + /* Record partial transfer */ + struct shdma_desc *desc = list_first_entry(&schan->ld_queue, + struct shdma_desc, node); + desc->partial = ops->get_partial(schan, desc); + } + spin_unlock_irqrestore(&schan->chan_lock, flags); shdma_chan_ld_cleanup(schan, true); diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 027c9be97654..f41bcc5267fd 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) return true; } +static size_t sh_dmae_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << + sh_chan->xmit_shift; +} + /* Called from error IRQ or NMI */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { @@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { .start_xfer = sh_dmae_start_xfer, .embedded_desc = sh_dmae_embedded_desc, .chan_irq = sh_dmae_chan_irq, + .get_partial = sh_dmae_get_partial, }; static int __devinit sh_dmae_probe(struct platform_device *pdev) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index d52dbc6c54ab..24acd711e032 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; + int ret; dma_cookie_init(&tdc->dma_chan); tdc->config_init = false; - return 0; + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) + dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); + return ret; } static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; @@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_del(&sg_req->node); kfree(sg_req); } + clk_disable_unprepare(tdma->dma_clk); } /* Tegra20 specific DMA controller information */ @@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) } } + /* Enable clock before accessing registers */ + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) { + dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); + goto err_pm_disable; + } + /* Reset DMA controller */ tegra_periph_reset_assert(tdma->dma_clk); udelay(2); @@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + clk_disable_unprepare(tdma->dma_clk); + INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c new file mode 100644 index 000000000000..6f80432a3f0a --- /dev/null +++ b/drivers/dma/virt-dma.c @@ -0,0 +1,123 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_add_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd; + dma_async_tx_callback cb = NULL; + void *cb_data = NULL; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + } + spin_unlock_irq(&vc->lock); + + if (cb) + cb(cb_data); + + while (!list_empty(&head)) { + vd = list_first_entry(&head, struct virt_dma_desc, node); + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + + list_del(&vd->node); + + vc->desc_free(vd); + + if (cb) + cb(cb_data); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + while (!list_empty(head)) { + struct virt_dma_desc *vd = list_first_entry(head, + struct virt_dma_desc, node); + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + + tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h new file mode 100644 index 000000000000..85c19d63f9fb --- /dev/null +++ b/drivers/dma/virt-dma.h @@ -0,0 +1,152 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); + +/** + * vchan_tx_prep - prepare a descriptor + * vc: virtual channel allocating this descriptor + * vd: virtual descriptor to prepare + * tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, vd->tx.cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_cyclic_callback - report the completion of a period + * vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + if (list_empty(&vc->desc_issued)) + return NULL; + + return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * vc: virtual channel to get descriptors from + * head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c index fe3db45fa83c..3cc152e690b0 100644 --- a/drivers/extcon/extcon_gpio.c +++ b/drivers/extcon/extcon_gpio.c @@ -107,7 +107,8 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name); + ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, + pdev->name); if (ret < 0) goto err; diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index 150d9768811d..ae37181798b3 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -266,7 +266,7 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) return 0; } -static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p) +static void em_gio_irq_domain_cleanup(struct em_gio_priv *p) { struct gpio_em_config *pdata = p->pdev->dev.platform_data; diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c index a1c8754f52cf..202a99207b7d 100644 --- a/drivers/gpio/gpio-langwell.c +++ b/drivers/gpio/gpio-langwell.c @@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; - int retval = 0; + int retval; int ngpio = id->driver_data; retval = pci_enable_device(pdev); @@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, base = ioremap_nocache(start, len); if (!base) { dev_err(&pdev->dev, "error mapping bar1\n"); + retval = -EFAULT; goto err3; } gpio_base = *((u32 *)base + 1); @@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) + if (!lnw->domain) { + retval = -ENOMEM; goto err3; + } lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 71a838f44501..b38986285868 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset) if (offset < 20) return INTEL_MSIC_GPIO0HV0CTLO - offset + 16; - return INTEL_MSIC_GPIO1HV0CTLO + offset + 20; + return INTEL_MSIC_GPIO1HV0CTLO - offset + 20; } static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 4db460b6ecf7..80f44bb64a87 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev) goto out_iounmap; port->bgc.gc.to_irq = mxc_gpio_to_irq; - port->bgc.gc.base = pdev->id * 32; - port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); - port->bgc.data = port->bgc.read_reg(port->bgc.reg_set); + port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 : + pdev->id * 32; err = gpiochip_add(&port->bgc.gc); if (err) diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 58a6a63a6ece..9cac88a65f78 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -62,6 +62,7 @@ int pxa_last_gpio; #ifdef CONFIG_OF static struct irq_domain *domain; +static struct device_node *pxa_gpio_of_node; #endif struct pxa_gpio_chip { @@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) (value ? GPSR_OFFSET : GPCR_OFFSET)); } +#ifdef CONFIG_OF_GPIO +static int pxa_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + if (gpiospec->args[0] > pxa_last_gpio) + return -EINVAL; + + if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[1]; + + return gpiospec->args[0] % 32; +} +#endif + static int __devinit pxa_init_gpio_chip(int gpio_end, int (*set_wake)(unsigned int, unsigned int)) { @@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end, c->get = pxa_gpio_get; c->set = pxa_gpio_set; c->to_irq = pxa_gpio_to_irq; +#ifdef CONFIG_OF_GPIO + c->of_node = pxa_gpio_of_node; + c->of_xlate = pxa_gpio_of_xlate; + c->of_gpio_n_cells = 2; +#endif /* number of GPIOs on last bank may be less than 32 */ c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; @@ -488,6 +512,7 @@ static int pxa_gpio_nums(void) return count; } +#ifdef CONFIG_OF static struct of_device_id pxa_gpio_dt_ids[] = { { .compatible = "mrvl,pxa-gpio" }, { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO }, @@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq, const struct irq_domain_ops pxa_irq_domain_ops = { .map = pxa_irq_domain_map, + .xlate = irq_domain_xlate_twocell, }; -#ifdef CONFIG_OF static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) { int ret, nr_banks, nr_gpios, irq_base; @@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) } domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0, &pxa_irq_domain_ops, NULL); + pxa_gpio_of_node = np; return 0; err: iounmap(gpio_reg_base); @@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = { .probe = pxa_gpio_probe, .driver = { .name = "pxa-gpio", - .of_match_table = pxa_gpio_dt_ids, + .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, }; diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 92f7b2bb79d4..ba126cc04073 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { }, }, { .chip = { - .base = EXYNOS5_GPC4(0), - .ngpio = EXYNOS5_GPIO_C4_NR, - .label = "GPC4", - }, - }, { - .chip = { .base = EXYNOS5_GPD0(0), .ngpio = EXYNOS5_GPIO_D0_NR, .label = "GPD0", @@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { .label = "GPY6", }, }, { + .chip = { + .base = EXYNOS5_GPC4(0), + .ngpio = EXYNOS5_GPIO_C4_NR, + .label = "GPC4", + }, + }, { .config = &samsung_gpio_cfgs[9], .irq_base = IRQ_EINT(0), .chip = { @@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void) } /* need to set base address for gpc4 */ - exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; + exynos5_gpios_1[20].base = gpio_base1 + 0x2E0; /* need to set base address for gpx */ chip = &exynos5_gpios_1[21]; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index 424dce8e3f30..8707d4572a06 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev) break; default: - return -ENODEV; + err = -ENODEV; + goto err_sch_gpio_core; } sch_gpio_core.dev = &pdev->dev; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 23120c00a881..90e28081712d 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -22,6 +22,7 @@ menuconfig DRM config DRM_USB tristate depends on DRM + depends on USB_ARCH_HAS_HCD select USB config DRM_KMS_HELPER diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 66d4a28ad5a2..0303935d10e2 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name, { const struct firmware *fw; struct platform_device *pdev; - u8 *fwdata = NULL, *edid; + u8 *fwdata = NULL, *edid, *new_edid; int fwsize, expected; int builtin = 0, err = 0; int i, valid_extensions = 0; @@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name, "\"%s\" for connector \"%s\"\n", valid_extensions, edid[0x7e], name, connector_name); edid[0x7e] = valid_extensions; - edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, + new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); - if (edid == NULL) { + if (new_edid == NULL) { err = -ENOMEM; + kfree(edid); goto relfw_out; } + edid = new_edid; } connector->display_info.raw_edid = edid; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ed22612bc847..a24ffbe97c01 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ + INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ + INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ + INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ + INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ + INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ + INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ + INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index da8b01fb1bf8..a9d58d72bb4d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; struct drm_i915_gem_object *from_obj = ring->last_context_obj; - int ret; if (dev_priv->hw_contexts_disabled) return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5af631e788c8..ff2819ea0813 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj = to_intel_bo(target_obj); target_offset = target_i915_obj->gtt_offset; + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ @@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, io_mapping_unmap_atomic(reloc_page); } - /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and - * pipe_control writes because the gpu doesn't properly redirect them - * through the ppgtt for non_secure batchbuffers. */ - if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); - } - /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9fd25a435536..ee9b68f6bc36 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -361,7 +361,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.gtt->needs_dmar) + /* don't map imported dma buf objects */ + if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table) return intel_gtt_map_memory(obj->pages, obj->base.size >> PAGE_SHIFT, &obj->sg_list, diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2f5388af8df9..7631807a2788 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,7 @@ #include "intel_drv.h" #include "i915_drv.h" +#ifdef CONFIG_PM static u32 calc_residency(struct drm_device *dev, const u32 reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev) device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); } +#else +void i915_setup_sysfs(struct drm_device *dev) +{ + return; +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + return; +} +#endif /* CONFIG_PM */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f6159765f1eb..a69a3d0d3acf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, unsigned long bestppm, ppm, absppm; int dotclk, flag; + flag = 0; dotclk = target * 1000; bestppm = 1000000; ppm = absppm = 0; @@ -3753,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, continue; } - if (intel_encoder->type == INTEL_OUTPUT_EDP) { - /* Use VBT settings if we have an eDP panel */ - unsigned int edp_bpc = dev_priv->edp.bpp / 3; - - if (edp_bpc < display_bpc) { - DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); - display_bpc = edp_bpc; - } - continue; - } - /* Not one of the known troublemakers, check the EDID */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0a56b9ab0f58..a6c426afaa7a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(dev_priv); - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); + /* We need to switch off panel power _and_ force vdd, for otherwise some + * panels get very unhappy and cease to work. */ + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + intel_dp->want_panel_vdd = false; + ironlake_wait_panel_off(intel_dp); } @@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) /* Switching the panel off requires vdd. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, mode); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); if (is_cpu_edp(intel_dp)) ironlake_edp_pll_off(encoder); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 84353559441c..132ab511b90c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -46,15 +46,16 @@ }) #define wait_for_atomic_us(COND, US) ({ \ - int i, ret__ = -ETIMEDOUT; \ - for (i = 0; i < (US); i++) { \ - if ((COND)) { \ - ret__ = 0; \ - break; \ - } \ - udelay(1); \ - } \ - ret__; \ + unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + cpu_relax(); \ + } \ + ret__; \ }) #define wait_for(COND, MS) _wait_for(COND, MS, 1) @@ -380,7 +381,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); -extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1991a4408cf9..b9755f6378d8 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev) bus->dev_priv = dev_priv; bus->adapter.algo = &gmbus_algorithm; - ret = i2c_add_adapter(&bus->adapter); - if (ret) - goto err; /* By default use a conservative clock rate */ bus->reg0 = port | GMBUS_RATE_100KHZ; @@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev) bus->force_bit = true; intel_gpio_setup(bus, port); + + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; } intel_i2c_reset(dev_priv->dev); @@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (dev_priv->gmbus == NULL) - return; - for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 10c7d39034e1..3df4f5fa892a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) return val; } -u32 intel_panel_get_backlight(struct drm_device *dev) +static u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; @@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - dev_priv->backlight_enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); - if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; @@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev, * we don't track the backlight dpms state, hence check whether * we have to do anything first. */ if (tmp & BLM_PWM_ENABLE) - return; + goto set_level; if (dev_priv->num_pipe == 3) tmp &= ~BLM_PIPE_SELECT_IVB; @@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev, I915_WRITE(BLC_PWM_PCH_CTL1, tmp); } } + +set_level: + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. + */ + dev_priv->backlight_enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 94aabcaa3a67..58c07cdafb7e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3963,6 +3963,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -3983,6 +3984,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); + POSTING_READ(FORCEWAKE_MT); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -4018,14 +4020,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE_MT); gen6_gt_check_fifodbg(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index bf0195a96d53..e2a73b38abe9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, * number of bits based on the write domains has little performance * impact. */ - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - /* - * Ensure that any following seqno writes only happen when the render - * cache is indeed flushed (but only if the caller actually wants that). - */ - if (flush_domains) + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* + * Ensure that any following seqno writes only happen + * when the render cache is indeed flushed. + */ flags |= PIPE_CONTROL_CS_STALL; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + } - ret = intel_ring_begin(ring, 6); + ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); /* lower dword */ - intel_ring_emit(ring, 0); /* uppwer dword */ - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, 0); intel_ring_advance(ring); return 0; @@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - /* Initialize the ring. */ - I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) } } + /* Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. */ + I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26a6a4d0d078..d172e9873131 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, struct i2c_msg *msgs; int i, ret = true; + /* Would be simpler to allocate both in one go ? */ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); if (!buf) return false; msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); - if (!msgs) + if (!msgs) { + kfree(buf); return false; + } intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a4d7c500c97b..b69642d5d850 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; - unsigned int testr, testn, testm, testo; + int testr, testn, testm, testo; unsigned int p, m, n; - unsigned int computed; + unsigned int computed, vco; int tmp; + const unsigned int m_div_val[] = { 1, 2, 4, 8 }; m = n = p = 0; vcomax = 1488000; @@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) if (delta == 0) break; for (testo = 5; testo < 33; testo++) { - computed = pllreffreq * (testn + 1) / + vco = pllreffreq * (testn + 1) / (testr + 1); - if (computed < vcomin) + if (vco < vcomin) continue; - if (computed > vcomax) + if (vco > vcomax) continue; + computed = vco / (m_div_val[testm] * (testo + 1)); if (computed > clock) tmpdelta = computed - clock; else diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index fc841e87b343..26ebffebe710 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -211,11 +211,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } -static int nouveau_dsm_init(void) -{ - return 0; -} - static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ @@ -232,7 +227,6 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev) static struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, - .init = nouveau_dsm_init, .get_client_id = nouveau_dsm_get_client_id, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 77e564667b5c..240cf962c999 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev) } break; case 6: /* NV50- DP AUX */ - port->drive = entry[0]; + port->drive = entry[0] & 0x0f; port->sense = port->drive; port->adapter.algo = &nouveau_dp_i2c_algo; break; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1cdfd6e757ce..1866dbb49979 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -731,7 +731,6 @@ nouveau_card_init(struct drm_device *dev) case 0xa3: case 0xa5: case 0xa8: - case 0xaf: nva3_copy_create(dev); break; } diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c index cc82d799fc3b..c564c5e4c30a 100644 --- a/drivers/gpu/drm/nouveau/nv84_fifo.c +++ b/drivers/gpu/drm/nouveau/nv84_fifo.c @@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; + u32 save; /* remove channel from playlist, will context switch if active */ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); nv50_fifo_playlist_update(dev); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell any engines on this channel to unload their contexts */ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); + nv_wr32(dev, 0x002520, save); + nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); @@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv84_fifo_priv *priv = nv_engine(dev, engine); int i; + u32 save; /* set playlist length to zero, fifo will unload context */ nv_wr32(dev, 0x0032ec, 0); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell all connected engines to unload their contexts */ for (i = 0; i < priv->base.channels; i++) { struct nouveau_channel *chan = dev_priv->channels.ptr[i]; @@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) } } + nv_wr32(dev, 0x002520, save); nv_wr32(dev, 0x002140, 0); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c index 7c95c44e2887..4e712b10ebdb 100644 --- a/drivers/gpu/drm/nouveau/nvc0_pm.c +++ b/drivers/gpu/drm/nouveau/nvc0_pm.c @@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) nouveau_mem_exec(&exec, info->perflvl); if (dev_priv->chipset < 0xd0) - nv_wr32(dev, 0x611200, 0x00003300); + nv_wr32(dev, 0x611200, 0x00003330); else nv_wr32(dev, 0x62c000, 0x03030300); } diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index d0d60e1e7f95..dac525b2994e 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c @@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ch = EVO_CURS(nv_crtc->index); - evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x); + evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); evo_piow(crtc->dev, ch, 0x0080, 0x00000000); return 0; } diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c index 1855ecbd843b..e98d144e6eb9 100644 --- a/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/drivers/gpu/drm/nouveau/nve0_fifo.c @@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit) printk(" on channel 0x%010llx\n", (u64)inst << 12); } +static int +nve0_fifo_page_flip(struct drm_device *dev, u32 chid) +{ + struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (likely(chid >= 0 && chid < priv->base.channels)) { + chan = dev_priv->channels.ptr[chid]; + if (likely(chan)) + ret = nouveau_finish_page_flip(chan, NULL); + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} + static void nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) { @@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; u32 subc = (addr & 0x00070000); u32 mthd = (addr & 0x00003ffc); + u32 show = stat; + + if (stat & 0x00200000) { + if (mthd == 0x0054) { + if (!nve0_fifo_page_flip(dev, chid)) + show &= ~0x00200000; + } + } - NV_INFO(dev, "PSUBFIFO %d:", unit); - nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat); - NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", - unit, chid, subc, mthd, data); + if (show) { + NV_INFO(dev, "PFIFO%d:", unit); + nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); + NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, subc, mthd, data); + } nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 9e6f76fec527..c6fcb5b86a45 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -259,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); /* disable crtc pair power gating before programming */ - if (ASIC_IS_DCE6(rdev)) + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) @@ -279,7 +279,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; /* power gating is per-pair */ - if (ASIC_IS_DCE6(rdev)) { + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { struct drm_crtc *other_crtc; struct radeon_crtc *other_radeon_crtc; list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { @@ -1531,12 +1531,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { - if (ASIC_IS_DCE5(rdev)) - return ATOM_DCPLL; + if (rdev->clock.dp_extclk) + return ATOM_PPLL_INVALID; else if (ASIC_IS_DCE6(rdev)) return ATOM_PPLL0; - else if (rdev->clock.dp_extclk) - return ATOM_PPLL_INVALID; + else if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; } } } @@ -1635,18 +1635,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + radeon_crtc->in_mode_set = true; /* pick pll */ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); + /* disable crtc pair power gating before programming */ + if (ASIC_IS_DCE6(rdev)) + atombios_powergate_crtc(crtc, ATOM_DISABLE); + atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); + radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e585a3b947eb..e93b80a6d4e9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { - save->vga_control[0] = RREG32(D1VGA_CONTROL); - save->vga_control[1] = RREG32(D2VGA_CONTROL); save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); - save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (rdev->num_crtc >= 4) { - save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); - save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - } - if (rdev->num_crtc >= 6) { - save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); - save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); - } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); @@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s /* Unlock host access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(D1VGA_CONTROL, save->vga_control[0]); - WREG32(D2VGA_CONTROL, save->vga_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); - WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); - WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); - } - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); - } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.evergreen.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.evergreen.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.evergreen.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.evergreen.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.evergreen.tile_config |= 2 << 4; + break; + } } rdev->config.evergreen.tile_config |= 0 << 8; rdev->config.evergreen.tile_config |= diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index c16554122ccd..e44a62a07fe3 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, case V_030000_SQ_TEX_DIM_1D_ARRAY: case V_030000_SQ_TEX_DIM_2D_ARRAY: depth = 1; + break; + case V_030000_SQ_TEX_DIM_2D_MSAA: + case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA: + surf.nsamples = 1 << llevel; + llevel = 0; + depth = 1; + break; case V_030000_SQ_TEX_DIM_3D: break; default: @@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) if (track->db_dirty) { /* Check stencil buffer */ - if (G_028800_STENCIL_ENABLE(track->db_depth_control)) { + if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID && + G_028800_STENCIL_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_stencil(p); if (r) return r; } /* Check depth buffer */ - if (G_028800_Z_ENABLE(track->db_depth_control)) { + if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID && + G_028800_Z_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_depth(p); if (r) return r; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index d3bd098e4e19..79347855d9bf 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1277,6 +1277,8 @@ #define S_028044_FORMAT(x) (((x) & 0x1) << 0) #define G_028044_FORMAT(x) (((x) >> 0) & 0x1) #define C_028044_FORMAT 0xFFFFFFFE +#define V_028044_STENCIL_INVALID 0 +#define V_028044_STENCIL_8 1 #define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 9945d86d9001..853800e8582f 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.cayman.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.cayman.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.cayman.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.cayman.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.cayman.tile_config |= 2 << 4; + break; + } } rdev->config.cayman.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 637280f541a3..d79c639ae739 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } } + +/** + * r600_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (R6xx-cayman). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t r600_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index ca87f7afaf23..3dab49cb1d4a 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -764,8 +764,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) } /* Check depth buffer */ - if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) || - G_028800_Z_ENABLE(track->db_depth_control))) { + if (track->db_dirty && + G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && + (G_028800_STENCIL_ENABLE(track->db_depth_control) || + G_028800_Z_ENABLE(track->db_depth_control))) { r = r600_cs_track_validate_db(p); if (r) return r; @@ -1557,13 +1559,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, u32 tiling_flags) { struct r600_cs_track *track = p->track; - u32 nfaces, llevel, blevel, w0, h0, d0; - u32 word0, word1, l0_size, mipmap_size, word2, word3; + u32 dim, nfaces, llevel, blevel, w0, h0, d0; + u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; u32 height_align, pitch, pitch_align, depth_align; - u32 array, barray, larray; + u32 barray, larray; u64 base_align; struct array_mode_checker array_check; u32 format; + bool is_array; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) @@ -1581,12 +1584,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } word1 = radeon_get_ib_value(p, idx + 1); + word2 = radeon_get_ib_value(p, idx + 2) << 8; + word3 = radeon_get_ib_value(p, idx + 3) << 8; + word4 = radeon_get_ib_value(p, idx + 4); + word5 = radeon_get_ib_value(p, idx + 5); + dim = G_038000_DIM(word0); w0 = G_038000_TEX_WIDTH(word0) + 1; + pitch = (G_038000_PITCH(word0) + 1) * 8; h0 = G_038004_TEX_HEIGHT(word1) + 1; d0 = G_038004_TEX_DEPTH(word1); + format = G_038004_DATA_FORMAT(word1); + blevel = G_038010_BASE_LEVEL(word4); + llevel = G_038014_LAST_LEVEL(word5); + /* pitch in texels */ + array_check.array_mode = G_038000_TILE_MODE(word0); + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = 1; + array_check.blocksize = r600_fmt_get_blocksize(format); nfaces = 1; - array = 0; - switch (G_038000_DIM(word0)) { + is_array = false; + switch (dim) { case V_038000_SQ_TEX_DIM_1D: case V_038000_SQ_TEX_DIM_2D: case V_038000_SQ_TEX_DIM_3D: @@ -1599,29 +1618,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, break; case V_038000_SQ_TEX_DIM_1D_ARRAY: case V_038000_SQ_TEX_DIM_2D_ARRAY: - array = 1; + is_array = true; break; - case V_038000_SQ_TEX_DIM_2D_MSAA: case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: + is_array = true; + /* fall through */ + case V_038000_SQ_TEX_DIM_2D_MSAA: + array_check.nsamples = 1 << llevel; + llevel = 0; + break; default: dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } - format = G_038004_DATA_FORMAT(word1); if (!r600_fmt_is_valid_texture(format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, format); return -EINVAL; } - /* pitch in texels */ - pitch = (G_038000_PITCH(word0) + 1) * 8; - array_check.array_mode = G_038000_TILE_MODE(word0); - array_check.group_size = track->group_size; - array_check.nbanks = track->nbanks; - array_check.npipes = track->npipes; - array_check.nsamples = 1; - array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", @@ -1647,20 +1662,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } - word2 = radeon_get_ib_value(p, idx + 2) << 8; - word3 = radeon_get_ib_value(p, idx + 3) << 8; - - word0 = radeon_get_ib_value(p, idx + 4); - word1 = radeon_get_ib_value(p, idx + 5); - blevel = G_038010_BASE_LEVEL(word0); - llevel = G_038014_LAST_LEVEL(word1); if (blevel > llevel) { dev_warn(p->dev, "texture blevel %d > llevel %d\n", blevel, llevel); } - if (array == 1) { - barray = G_038014_BASE_ARRAY(word1); - larray = G_038014_LAST_ARRAY(word1); + if (is_array) { + barray = G_038014_BASE_ARRAY(word5); + larray = G_038014_LAST_ARRAY(word5); nfaces = larray - barray + 1; } @@ -1677,7 +1685,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ - word3 = radeon_get_ib_value(p, idx + 3) << 8; if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 4b116ae75fc2..fd328f4c3ea8 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -602,6 +602,9 @@ #define RLC_HB_WPTR 0x3f1c #define RLC_HB_WPTR_LSB_ADDR 0x3f14 #define RLC_HB_WPTR_MSB_ADDR 0x3f18 +#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38 +#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40 #define RLC_MC_CNTL 0x3f44 #define RLC_UCODE_CNTL 0x3f48 #define RLC_UCODE_ADDR 0x3f2c diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5431af292408..99304194a65c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -300,6 +300,7 @@ struct radeon_bo_va { uint64_t soffset; uint64_t eoffset; uint32_t flags; + struct radeon_fence *fence; bool valid; }; @@ -1533,6 +1534,7 @@ struct radeon_device { unsigned debugfs_count; /* virtual memory */ struct radeon_vm_manager vm_manager; + struct mutex gpu_clock_mutex; }; int radeon_device_init(struct radeon_device *rdev, @@ -1733,11 +1735,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) -#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) -#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) -#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) -#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) -#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) +#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) +#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) +#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f4af24310438..18c38d14c8cd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev); * rv515 */ struct rv515_mc_save { - u32 d1vga_control; - u32 d2vga_control; u32 vga_render_control; u32 vga_hdp_control; - u32 d1crtc_control; - u32 d2crtc_control; }; + int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_sa_bo *vb); int r600_mc_wait_for_idle(struct radeon_device *rdev); +uint64_t r600_get_gpu_clock(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 @@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev); * evergreen */ struct evergreen_mc_save { - u32 vga_control[6]; u32 vga_render_control; u32 vga_hdp_control; - u32 crtc_control[6]; }; + void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); @@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); +uint64_t si_get_gpu_clock(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index b1e3820df363..f9c21f9d16bc 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) @@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + union igp_info *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { - igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (id) { - case ASIC_INTERNAL_SS_ON_TMDS: - percentage = le16_to_cpu(igp_info->usDVISSPercentage); - rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + switch (crev) { + case 6: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_HDMI: - percentage = le16_to_cpu(igp_info->usHDMISSPercentage); - rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + case 7: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_LVDS: - percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); - rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + default: + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } if (percentage) diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 576f4f6919f2..f75247d42ffd 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde return i2c; } +static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct radeon_i2c_bus_rec i2c; + u16 offset; + u8 id, blocks, clk, data; + int i; + + i2c.valid = false; + + offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); + if (offset) { + blocks = RBIOS8(offset + 2); + for (i = 0; i < blocks; i++) { + id = RBIOS8(offset + 3 + (i * 5) + 0); + if (id == 136) { + clk = RBIOS8(offset + 3 + (i * 5) + 3); + data = RBIOS8(offset + 3 + (i * 5) + 4); + /* gpiopad */ + i2c = combios_setup_i2c_bus(rdev, DDC_MONID, + (1 << clk), (1 << data)); + break; + } + } + } + return i2c; +} + void radeon_combios_i2c_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) } else if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { - u16 offset; - u8 id, blocks, clk, data; - int i; - /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); - offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); - if (offset) { - blocks = RBIOS8(offset + 2); - for (i = 0; i < blocks; i++) { - id = RBIOS8(offset + 3 + (i * 5) + 0); - if (id == 136) { - clk = RBIOS8(offset + 3 + (i * 5) + 3); - data = RBIOS8(offset + 3 + (i * 5) + 4); - /* gpiopad */ - i2c = combios_setup_i2c_bus(rdev, DDC_MONID, - (1 << clk), (1 << data)); - rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); - break; - } - } - } + /* gpiopad */ + i2c = radeon_combios_get_i2c_info_from_table(rdev); + if (i2c.valid) + rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); } else if ((rdev->family == CHIP_R200) || (rdev->family >= CHIP_R300)) { /* 0x68 */ @@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) connector = (tmp >> 12) & 0xf; ddc_type = (tmp >> 8) & 0xf; - ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); + if (ddc_type == 5) + ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev); + else + ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); switch (connector) { case CONNECTOR_PROPRIETARY_LEGACY: diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 8a4c49ef0cc4..b4a0db24f4dd 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return 0; } +static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser, + struct radeon_fence *fence) +{ + struct radeon_fpriv *fpriv = parser->filp->driver_priv; + struct radeon_vm *vm = &fpriv->vm; + struct radeon_bo_list *lobj; + + if (parser->chunk_ib_idx == -1) { + return; + } + if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) { + return; + } + + list_for_each_entry(lobj, &parser->validated, tv.head) { + struct radeon_bo_va *bo_va; + struct radeon_bo *rbo = lobj->bo; + + bo_va = radeon_bo_va(rbo, vm); + radeon_fence_unref(&bo_va->fence); + bo_va->fence = radeon_fence_ref(fence); + } +} + /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (!error) + if (!error) { + /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */ + radeon_bo_vm_fence_va(parser, parser->ib.fence); ttm_eu_fence_buffer_objects(&parser->validated, parser->ib.fence); - else + } else { ttm_eu_backoff_reservation(&parser->validated); + } if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { @@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (parser->chunk_ib_idx == -1) return 0; - if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 711e95ad39bf..8794744cdf1a 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); - WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); @@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | - EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 742af8244e89..d2e243867ac6 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev, atomic_set(&rdev->ih.lock, 0); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); + mutex_init(&rdev->gpu_clock_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dcea6f01ae4e..d7269f48d37c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -59,9 +59,12 @@ * 2.15.0 - add max_pipes query * 2.16.0 - fix evergreen 2D tiled surface calculation * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx + * 2.18.0 - r600-eg: allow "invalid" DB formats + * 2.19.0 - r600-eg: MSAA textures + * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 17 +#define KMS_DRIVER_MINOR 20 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index b3720054614d..bb3b7fe05ccd 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, return -EINVAL; } - if (bo_va->valid) + if (bo_va->valid && mem) return 0; ngpu_pages = radeon_bo_ngpu_pages(bo); @@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo *bo) { struct radeon_bo_va *bo_va; + int r; bo_va = radeon_bo_va(bo, vm); if (bo_va == NULL) return 0; + /* wait for va use to end */ + while (bo_va->fence) { + r = radeon_fence_wait(bo_va->fence, false); + if (r) { + DRM_ERROR("error while waiting for fence: %d\n", r); + } + if (r == -EDEADLK) { + r = radeon_gpu_reset(rdev); + if (!r) + continue; + } + break; + } + radeon_fence_unref(&bo_va->fence); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); radeon_vm_bo_update_pte(rdev, vm, bo, NULL); @@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) } /** - * radeon_vm_init - tear down a vm instance + * radeon_vm_fini - tear down a vm instance * * @rdev: radeon_device pointer * @vm: requested vm @@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_unbind_locked(rdev, vm); mutex_unlock(&rdev->vm_manager.lock); - /* remove all bo */ + /* remove all bo at this point non are busy any more because unbind + * waited for the last vm fence to signal + */ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); list_del_init(&bo_va->bo_list); list_del_init(&bo_va->vm_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); kfree(bo_va); } @@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_reserve(bo_va->bo, false); if (!r) { list_del_init(&bo_va->bo_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(bo_va->bo); kfree(bo_va); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 84d045245739..1b57b0058ad6 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_device *rdev = rbo->rdev; struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; - struct radeon_bo_va *bo_va, *tmp; if (rdev->family < CHIP_CAYMAN) { return; } if (radeon_bo_reserve(rbo, false)) { + dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); return; } - list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { - if (bo_va->vm == vm) { - /* remove from this vm address space */ - mutex_lock(&vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); - } - } + radeon_vm_bo_rmv(rdev, vm, rbo); radeon_bo_unreserve(rbo); } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 1d73f16b5d97..414b4acf6947 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -29,6 +29,7 @@ #include "drm_sarea.h" #include "radeon.h" #include "radeon_drm.h" +#include "radeon_asic.h" #include <linux/vga_switcheroo.h> #include <linux/slab.h> @@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev, int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; - struct drm_radeon_info *info; + struct drm_radeon_info *info = data; struct radeon_mode_info *minfo = &rdev->mode_info; - uint32_t *value_ptr; - uint32_t value; + uint32_t value, *value_ptr; + uint64_t value64, *value_ptr64; struct drm_crtc *crtc; int i, found; - info = data; + /* TIMESTAMP is a 64-bit value, needs special handling. */ + if (info->request == RADEON_INFO_TIMESTAMP) { + if (rdev->family >= CHIP_R600) { + value_ptr64 = (uint64_t*)((unsigned long)info->value); + if (rdev->family >= CHIP_TAHITI) { + value64 = si_get_gpu_clock(rdev); + } else { + value64 = r600_get_gpu_clock(rdev); + } + + if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); + return -EFAULT; + } + return 0; + } else { + DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); + return -EINVAL; + } + } + value_ptr = (uint32_t *)((unsigned long)info->value); - if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) + if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { + DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; + } switch (info->request) { case RADEON_INFO_DEVICE_ID: @@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { - DRM_ERROR("copy_to_user\n"); + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); return -EFAULT; } return 0; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index d5fd615897ec..94b4a1c12893 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; + radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } + radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f380d59c5763..d56978949f34 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -275,6 +275,7 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; + bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f1a4c803c1d..1cb014b571ab 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { /* remove from all vm address space */ - mutex_lock(&bo_va->vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); + radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); } } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index a12fbcc8ccb6..aa8ef491ef3c 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) { - save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL); - save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL); save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); - save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL); - save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); /* Stop all video */ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); @@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) /* Unlock host access */ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); - WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); - WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); - WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c053f8193771..0139e227e3c7 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev) /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; - } - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.si.tile_config |= 1 << 4; - else + } + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.si.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.si.tile_config |= 2 << 4; + break; + } rdev->config.si.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.si.tile_config |= @@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev) rdev->bios = NULL; } +/** + * si_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (SI). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t si_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 7869089e8761..ef4815c27b1c 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -698,6 +698,9 @@ #define RLC_UCODE_ADDR 0xC32C #define RLC_UCODE_DATA 0xC330 +#define RLC_GPU_CLOCK_COUNT_LSB 0xC338 +#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 #define RLC_MC_CNTL 0xC344 #define RLC_UCODE_CNTL 0xC348 diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index 0b5e096d39a6..56e0bf31d425 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -1,6 +1,7 @@ config DRM_UDL tristate "DisplayLink" depends on DRM && EXPERIMENTAL + depends on USB_ARCH_HAS_HCD select DRM_USB select FB_SYS_FILLRECT select FB_SYS_COPYAREA diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 7bd65bdd15a8..291ecc145585 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) - return ERR_PTR(PTR_ERR(attach)); + return ERR_CAST(attach); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 5b3c7d135dc9..e25cf31faab2 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -70,27 +70,12 @@ static struct vgasr_priv vgasr_priv = { .clients = LIST_HEAD_INIT(vgasr_priv.clients), }; -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) -{ - mutex_lock(&vgasr_mutex); - if (vgasr_priv.handler) { - mutex_unlock(&vgasr_mutex); - return -EINVAL; - } - - vgasr_priv.handler = handler; - mutex_unlock(&vgasr_mutex); - return 0; -} -EXPORT_SYMBOL(vga_switcheroo_register_handler); - -void vga_switcheroo_unregister_handler(void) +static bool vga_switcheroo_ready(void) { - mutex_lock(&vgasr_mutex); - vgasr_priv.handler = NULL; - mutex_unlock(&vgasr_mutex); + /* we're ready if we get two clients + handler */ + return !vgasr_priv.active && + vgasr_priv.registered_clients == 2 && vgasr_priv.handler; } -EXPORT_SYMBOL(vga_switcheroo_unregister_handler); static void vga_switcheroo_enable(void) { @@ -98,7 +83,8 @@ static void vga_switcheroo_enable(void) struct vga_switcheroo_client *client; /* call the handler to init */ - vgasr_priv.handler->init(); + if (vgasr_priv.handler->init) + vgasr_priv.handler->init(); list_for_each_entry(client, &vgasr_priv.clients, list) { if (client->id != -1) @@ -113,6 +99,37 @@ static void vga_switcheroo_enable(void) vgasr_priv.active = true; } +int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) +{ + mutex_lock(&vgasr_mutex); + if (vgasr_priv.handler) { + mutex_unlock(&vgasr_mutex); + return -EINVAL; + } + + vgasr_priv.handler = handler; + if (vga_switcheroo_ready()) { + printk(KERN_INFO "vga_switcheroo: enabled\n"); + vga_switcheroo_enable(); + } + mutex_unlock(&vgasr_mutex); + return 0; +} +EXPORT_SYMBOL(vga_switcheroo_register_handler); + +void vga_switcheroo_unregister_handler(void) +{ + mutex_lock(&vgasr_mutex); + vgasr_priv.handler = NULL; + if (vgasr_priv.active) { + pr_info("vga_switcheroo: disabled\n"); + vga_switcheroo_debugfs_fini(&vgasr_priv); + vgasr_priv.active = false; + } + mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_unregister_handler); + static int register_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, int id, bool active) @@ -134,9 +151,7 @@ static int register_client(struct pci_dev *pdev, if (client_is_vga(client)) vgasr_priv.registered_clients++; - /* if we get two clients + handler */ - if (!vgasr_priv.active && - vgasr_priv.registered_clients == 2 && vgasr_priv.handler) { + if (vga_switcheroo_ready()) { printk(KERN_INFO "vga_switcheroo: enabled\n"); vga_switcheroo_enable(); } diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index faa16f80db9c..0fa356fe82cc 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -196,7 +196,7 @@ struct tjmax { int tjmax; }; -static struct tjmax __cpuinitconst tjmax_table[] = { +static const struct tjmax __cpuinitconst tjmax_table[] = { { "CPU D410", 100000 }, { "CPU D425", 100000 }, { "CPU D510", 100000 }, diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c index ab4825205a9d..5b1a6a666441 100644 --- a/drivers/hwmon/w83627hf.c +++ b/drivers/hwmon/w83627hf.c @@ -1206,7 +1206,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr, int err = -ENODEV; u16 val; - static const __initdata char *names[] = { + static __initconst char *const names[] = { "W83627HF", "W83627THF", "W83697HF", diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f559088869f6..e8726177d103 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -606,8 +606,9 @@ static int __init intel_idle_init(void) intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { + struct cpuidle_driver *drv = cpuidle_get_driver(); printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", - cpuidle_get_driver()->name); + drv ? drv->name : "none"); return retval; } diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c index 59fbb3ae40e7..e35bb8f6fe75 100644 --- a/drivers/iio/frequency/adf4350.c +++ b/drivers/iio/frequency/adf4350.c @@ -129,7 +129,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) { struct adf4350_platform_data *pdata = st->pdata; u64 tmp; - u32 div_gcd, prescaler; + u32 div_gcd, prescaler, chspc; u16 mdiv, r_cnt = 0; u8 band_sel_div; @@ -158,14 +158,20 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) if (pdata->ref_div_factor) r_cnt = pdata->ref_div_factor - 1; - do { - r_cnt = adf4350_tune_r_cnt(st, r_cnt); + chspc = st->chspc; - st->r1_mod = st->fpfd / st->chspc; - while (st->r1_mod > ADF4350_MAX_MODULUS) { - r_cnt = adf4350_tune_r_cnt(st, r_cnt); - st->r1_mod = st->fpfd / st->chspc; - } + do { + do { + do { + r_cnt = adf4350_tune_r_cnt(st, r_cnt); + st->r1_mod = st->fpfd / chspc; + if (r_cnt > ADF4350_MAX_R_CNT) { + /* try higher spacing values */ + chspc++; + r_cnt = 0; + } + } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt); + } while (r_cnt == 0); tmp = freq * (u64)st->r1_mod + (st->fpfd > 1); do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */ @@ -194,7 +200,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) | ADF4350_REG0_FRACT(st->r0_fract); - st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(0) | + st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) | ADF4350_REG1_MOD(st->r1_mod) | prescaler; diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 1cbb449b319a..9a99f43094f0 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c @@ -271,9 +271,10 @@ static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adjd_s311_data *data = iio_priv(indio_dev); - data->buffer = krealloc(data->buffer, indio_dev->scan_bytes, - GFP_KERNEL); - if (!data->buffer) + + kfree(data->buffer); + data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + if (data->buffer == NULL) return -ENOMEM; return 0; diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c index c3e7bac13123..e45712a921ce 100644 --- a/drivers/iio/light/lm3533-als.c +++ b/drivers/iio/light/lm3533-als.c @@ -404,7 +404,7 @@ out: return ret; } -static int show_thresh_either_en(struct device *dev, +static ssize_t show_thresh_either_en(struct device *dev, struct device_attribute *attr, char *buf) { @@ -424,7 +424,7 @@ static int show_thresh_either_en(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%u\n", enable); } -static int store_thresh_either_en(struct device *dev, +static ssize_t store_thresh_either_en(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 6bf850422895..055ed59838dc 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, if (!uevent) return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; + mutex_lock(&ctx->file->mut); uevent->cm_id = cm_id; ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; @@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); - mutex_lock(&ctx->file->mut); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 8c81992fa6db..e4a73158fc7f 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) /* * Called by c2_probe to initialize the RNIC. This principally - * involves initalizing the various limits and resouce pools that + * involves initializing the various limits and resource pools that * comprise the RNIC instance. */ int __devinit c2_rnic_init(struct c2_dev *c2dev) diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 77b6b182778a..aaf88ef9409c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * T3A does 3 things when a TERM is received: * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet * 2) generate an async event on the QP with the TERMINATE opcode - * 3) post a TERMINATE opcde cqe into the associated CQ. + * 3) post a TERMINATE opcode cqe into the associated CQ. * * For (1), we save the message in the qp for later consumer consumption. * For (2), we move the QP into TERMINATE, post a QP event and disconnect. diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c27141fef1ab..9c2ae7efd00f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct ib_ah_attr ah_attr; + unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; @@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) if (IS_ERR(new_ah)) return; - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) ib_destroy_ah(dev->sm_ah[port_num - 1]); dev->sm_ah[port_num - 1] = new_ah; - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); } /* @@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { + unsigned long flags; + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { - spin_lock(&to_mdev(dev)->sm_lock); + spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); - spin_unlock(&to_mdev(dev)->sm_lock); + spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); } } @@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; + unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, @@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma * wrong following the IB spec strictly, but we know * it's OK for our devices). */ - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index fe2088cfa6ee..cc05579ebce7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { struct mlx4_cmd_mailbox *mailbox; + unsigned long flags; if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; @@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; - spin_lock(&to_mdev(ibdev)->sm_lock); + spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); memcpy(ibdev->node_desc, props->node_desc, 64); - spin_unlock(&to_mdev(ibdev)->sm_lock); + spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); /* * If possible, pass node desc to FW, so it can generate diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a6d8ea060ea8..f585eddef4b7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + struct net_device *ndev; union ib_gid sgid; u16 pkey; int send_size; @@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ - smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; + ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; + if (!ndev) + return -ENODEV; + smac = ndev->dev_addr; memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 5a044526e4f4..c4e0131f1b57 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) ocrdma_get_guid(dev, &sgid->raw[8]); } -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q) static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) { struct net_device *netdev, *tmp; @@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) return 0; } -#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) +#if IS_ENABLED(CONFIG_IPV6) static int ocrdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; - struct net_device *event_netdev = ifa->idev->dev; - struct net_device *netdev = NULL; + struct net_device *netdev = ifa->idev->dev; struct ib_event gid_event; struct ocrdma_dev *dev; bool found = false; @@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier, bool is_vlan = false; u16 vid = 0; - netdev = vlan_dev_real_dev(event_netdev); - if (netdev != event_netdev) { - is_vlan = true; - vid = vlan_dev_vlan_id(event_netdev); + is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; + if (is_vlan) { + vid = vlan_dev_vlan_id(netdev); + netdev = vlan_dev_real_dev(netdev); } + rcu_read_lock(); list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { if (dev->nic_info.netdev == netdev) { diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 0d7280af99bc..3f6b21e9dc11 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) dd->piobcnt4k * dd->align4k; dd->piovl15base = ioremap_nocache(vl15off, NUM_VL15_BUFS * dd->align4k); - if (!dd->piovl15base) + if (!dd->piovl15base) { + ret = -ENOMEM; goto bail; + } } qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index a322d5171a2c..50a8a0d4fe67 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, /* Read CTRL reg for each channel to check TRIMDONE */ if (baduns & (1 << chn)) { qib_dev_err(dd, - "Reseting TRIMDONE on chn %d (%s)\n", + "Resetting TRIMDONE on chn %d (%s)\n", chn, where); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 95ecf4eadf5f..24683fda8e21 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1271,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(tx->dev); + unsigned long flags; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { + spin_lock_irqsave(&priv->lock, flags); list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->daddr + 4); tx->neigh = NULL; + spin_unlock_irqrestore(&priv->lock, flags); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 97920b77a5d0..3e2085a3ee47 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1052,7 +1052,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh) for (n = rcu_dereference_protected(*np, lockdep_is_held(&ntbl->rwlock)); n != NULL; - n = rcu_dereference_protected(neigh->hnext, + n = rcu_dereference_protected(*np, lockdep_is_held(&ntbl->rwlock))) { if (n == neigh) { /* found */ diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index bcbf22ee0aa7..1b5b0c730054 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scmnd->sc_data_direction); } -static void srp_remove_req(struct srp_target_port *target, - struct srp_request *req, s32 req_lim_delta) +/** + * srp_claim_req - Take ownership of the scmnd associated with a request. + * @target: SRP target port. + * @req: SRP request. + * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take + * ownership of @req->scmnd if it equals @scmnd. + * + * Return value: + * Either NULL or a pointer to the SCSI command the caller became owner of. + */ +static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, + struct srp_request *req, + struct scsi_cmnd *scmnd) +{ + unsigned long flags; + + spin_lock_irqsave(&target->lock, flags); + if (!scmnd) { + scmnd = req->scmnd; + req->scmnd = NULL; + } else if (req->scmnd == scmnd) { + req->scmnd = NULL; + } else { + scmnd = NULL; + } + spin_unlock_irqrestore(&target->lock, flags); + + return scmnd; +} + +/** + * srp_free_req() - Unmap data and add request to the free request list. + */ +static void srp_free_req(struct srp_target_port *target, + struct srp_request *req, struct scsi_cmnd *scmnd, + s32 req_lim_delta) { unsigned long flags; - srp_unmap_data(req->scmnd, target, req); + srp_unmap_data(scmnd, target, req); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_lim_delta; - req->scmnd = NULL; list_add_tail(&req->list, &target->free_reqs); spin_unlock_irqrestore(&target->lock, flags); } static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) { - req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); - srp_remove_req(target, req, 0); + struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); + + if (scmnd) { + scmnd->result = DID_RESET << 16; + scmnd->scsi_done(scmnd); + srp_free_req(target, req, scmnd, 0); + } } static int srp_reconnect_target(struct srp_target_port *target) @@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) complete(&target->tsk_mgmt_done); } else { req = &target->req_ring[rsp->tag]; - scmnd = req->scmnd; - if (!scmnd) + scmnd = srp_claim_req(target, req, NULL); + if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); + + spin_lock_irqsave(&target->lock, flags); + target->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&target->lock, flags); + + return; + } scmnd->result = rsp->status; if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { @@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); - srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); + srp_free_req(target, req, scmnd, + be32_to_cpu(rsp->req_lim_delta)); + scmnd->host_scribble = NULL; scmnd->scsi_done(scmnd); } @@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; - int ret = SUCCESS; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (!req || target->qp_in_error) + if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) return FAILED; - if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, - SRP_TSK_ABORT_TASK)) - return FAILED; - - if (req->scmnd) { - if (!target->tsk_mgmt_status) { - srp_remove_req(target, req, 0); - scmnd->result = DID_ABORT << 16; - } else - ret = FAILED; - } + srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + SRP_TSK_ABORT_TASK); + srp_free_req(target, req, scmnd, 0); + scmnd->result = DID_ABORT << 16; - return ret; + return SUCCESS; } static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7a0ce8d42887..9e1449f8c6a2 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, * * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping * the data that has been transferred via IB RDMA had to be postponed until the - * check_stop_free() callback. None of this is nessecary anymore and needs to + * check_stop_free() callback. None of this is necessary anymore and needs to * be cleaned up. */ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c index 503c7096ed36..908407efc672 100644 --- a/drivers/input/touchscreen/eeti_ts.c +++ b/drivers/input/touchscreen/eeti_ts.c @@ -48,7 +48,7 @@ struct eeti_ts_priv { struct input_dev *input; struct work_struct work; struct mutex mutex; - int irq, irq_active_high; + int irq_gpio, irq, irq_active_high; }; #define EETI_TS_BITDEPTH (11) @@ -62,7 +62,7 @@ struct eeti_ts_priv { static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv) { - return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high; + return gpio_get_value(priv->irq_gpio) == priv->irq_active_high; } static void eeti_ts_read(struct work_struct *work) @@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev) static int __devinit eeti_ts_probe(struct i2c_client *client, const struct i2c_device_id *idp) { - struct eeti_ts_platform_data *pdata; + struct eeti_ts_platform_data *pdata = client->dev.platform_data; struct eeti_ts_priv *priv; struct input_dev *input; unsigned int irq_flags; @@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, priv->client = client; priv->input = input; - priv->irq = client->irq; + priv->irq_gpio = pdata->irq_gpio; + priv->irq = gpio_to_irq(pdata->irq_gpio); - pdata = client->dev.platform_data; + err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name); + if (err < 0) + goto err1; if (pdata) priv->irq_active_high = pdata->irq_active_high; @@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, err = input_register_device(input); if (err) - goto err1; + goto err2; err = request_irq(priv->irq, eeti_ts_isr, irq_flags, client->name, priv); if (err) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); - goto err2; + goto err3; } /* @@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, device_init_wakeup(&client->dev, 0); return 0; -err2: +err3: input_unregister_device(input); input = NULL; /* so we dont try to free it below */ +err2: + gpio_free(pdata->irq_gpio); err1: input_free_device(input); kfree(priv); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 6d1cbdfc9b2a..b64502dfa9f4 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 500e7f15f5c2..18a89b760aaa 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1111,7 +1111,7 @@ static void print_iommu_info(void) if (iommu->cap & (1 << IOMMU_CAP_EFR)) { pr_info("AMD-Vi: Extended features: "); - for (i = 0; ARRAY_SIZE(feat_str); ++i) { + for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } @@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void) break; } - /* Make sure ACS will be enabled */ - pci_request_acs(); - ret = amd_iommu_init_devices(); print_iommu_info(); @@ -1652,6 +1649,9 @@ static bool detect_ivrs(void) early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); + /* Make sure ACS will be enabled during PCI probe */ + pci_request_acs(); + return true; } diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 45350ff5e93c..80bad32aa463 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) spin_lock_init(&priv->pgtablelock); INIT_LIST_HEAD(&priv->clients); - dom->geometry.aperture_start = 0; - dom->geometry.aperture_end = ~0UL; - dom->geometry.force_aperture = true; + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = ~0UL; + domain->geometry.force_aperture = true; domain->priv = priv; return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 7469b5346643..2297ec193eb4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) if (!drhd) { printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", pci_name(pdev)); + free_domain_mem(domain); return NULL; } iommu = drhd->iommu; @@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index e0b18f3ae9a8..af8904de1d44 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -736,6 +736,7 @@ int __init parse_ioapics_under_ir(void) { struct dmar_drhd_unit *drhd; int ir_supported = 0; + int ioapic_idx; for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; @@ -748,13 +749,20 @@ int __init parse_ioapics_under_ir(void) } } - if (ir_supported && ir_ioapic_num != nr_ioapics) { - printk(KERN_WARNING - "Not all IO-APIC's listed under remapping hardware\n"); - return -1; + if (!ir_supported) + return 0; + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { + int ioapic_id = mpc_ioapic_id(ioapic_idx); + if (!map_ioapic_to_ir(ioapic_id)) { + pr_err(FW_BUG "ioapic %d has no mapping iommu, " + "interrupt remapping will be disabled\n", + ioapic_id); + return -1; + } } - return ir_supported; + return 1; } int __init ir_dev_scope_init(void) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 4ba325ab6262..2a4bb36bc688 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain, goto out; } } - dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); + dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); out: spin_unlock(&as->client_lock); } static int smmu_iommu_domain_init(struct iommu_domain *domain) { - int i, err = -ENODEV; + int i, err = -EAGAIN; unsigned long flags; struct smmu_as *as; struct smmu_device *smmu = smmu_handle; @@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain) /* Look for a free AS with lock held */ for (i = 0; i < smmu->num_as; i++) { as = &smmu->as[i]; - if (!as->pdir_page) { - err = alloc_pdir(as); - if (!err) - goto found; - } + + if (as->pdir_page) + continue; + + err = alloc_pdir(as); + if (!err) + goto found; + if (err != -EAGAIN) break; } diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 5405ec644db3..baf2686aa8eb 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -16,7 +16,6 @@ #include <linux/sched.h> #include "isdnloop.h" -static char *revision = "$Revision: 1.11.6.7 $"; static char *isdnloop_id = "loop0"; MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1) static int __init isdnloop_init(void) { - char *p; - char rev[10]; - - if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); - p = strchr(rev, '$'); - *p = 0; - } else - strcpy(rev, " ??? "); - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev); - if (isdnloop_id) return (isdnloop_addcard(isdnloop_id)); diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 0dc8abca1407..949cabb88f1c 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c @@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, InitWin(l2); l2->l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &l2->flag) || - test_bit(FLG_PTP, &l2->flag) || + test_bit(FLG_FIXED_TEI, &l2->flag) || test_bit(FLG_LAPD_NET, &l2->flag)) l2->l2m.state = ST_L2_4; else diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 6157cbbf4113..363975b3c925 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig, struct led_classdev *led_cdev; led_cdev = list_entry(entry, struct led_classdev, trig_list); - led_set_brightness(led_cdev, brightness); + __led_set_brightness(led_cdev, brightness); } read_unlock(&trig->leddev_list_lock); } diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c index 53bd136f1ef0..0ade6ebfc914 100644 --- a/drivers/leds/leds-lp8788.c +++ b/drivers/leds/leds-lp8788.c @@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led, /* scale configuration */ addr = LP8788_ISINK_CTRL; mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET); - val = cfg->scale << cfg->num; + val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET); ret = lp8788_update_bits(led->lp, addr, mask, val); if (ret) return ret; diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c index 9ee12c28059a..771ea067e680 100644 --- a/drivers/leds/leds-renesas-tpu.c +++ b/drivers/leds/leds-renesas-tpu.c @@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev) if (!cfg) { dev_err(&pdev->dev, "missing platform data\n"); - goto err0; + return -ENODEV; } p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 1eee45b69b71..d949b781f6f8 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -268,13 +268,14 @@ config DM_MIRROR needed for live data migration tools such as 'pvmove'. config DM_RAID - tristate "RAID 1/4/5/6 target" + tristate "RAID 1/4/5/6/10 target" depends on BLK_DEV_DM select MD_RAID1 + select MD_RAID10 select MD_RAID456 select BLK_DEV_MD ---help--- - A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings + A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings A RAID-5 set of N drives with a capacity of C MB per drive provides the capacity of C * (N - 1) MB, and protects against a failure diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 15dbe03117e4..94e7f6ba2e11 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1305,7 +1305,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->counts.lock); - io_schedule(); + schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f2f29c526544..982e3e390c45 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -11,6 +11,7 @@ #include "md.h" #include "raid1.h" #include "raid5.h" +#include "raid10.h" #include "bitmap.h" #include <linux/device-mapper.h> @@ -52,7 +53,10 @@ struct raid_dev { #define DMPF_MAX_RECOVERY_RATE 0x20 #define DMPF_MAX_WRITE_BEHIND 0x40 #define DMPF_STRIPE_CACHE 0x80 -#define DMPF_REGION_SIZE 0X100 +#define DMPF_REGION_SIZE 0x100 +#define DMPF_RAID10_COPIES 0x200 +#define DMPF_RAID10_FORMAT 0x400 + struct raid_set { struct dm_target *ti; @@ -76,6 +80,7 @@ static struct raid_type { const unsigned algorithm; /* RAID algorithm. */ } raid_types[] = { {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, + {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, @@ -86,6 +91,17 @@ static struct raid_type { {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} }; +static unsigned raid10_md_layout_to_copies(int layout) +{ + return layout & 0xFF; +} + +static int raid10_format_to_md_layout(char *format, unsigned copies) +{ + /* 1 "far" copy, and 'copies' "near" copies */ + return (1 << 8) | (copies & 0xFF); +} + static struct raid_type *get_raid_type(char *name) { int i; @@ -339,10 +355,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) * [stripe_cache <sectors>] Stripe cache size for higher RAIDs * [region_size <sectors>] Defines granularity of bitmap + * + * RAID10-only options: + * [raid10_copies <# copies>] Number of copies. (Default: 2) + * [raid10_format <near>] Layout algorithm. (Default: near) */ static int parse_raid_params(struct raid_set *rs, char **argv, unsigned num_raid_params) { + char *raid10_format = "near"; + unsigned raid10_copies = 2; unsigned i, rebuild_cnt = 0; unsigned long value, region_size = 0; sector_t sectors_per_dev = rs->ti->len; @@ -416,11 +438,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } key = argv[i++]; + + /* Parameters that take a string value are checked here. */ + if (!strcasecmp(key, "raid10_format")) { + if (rs->raid_type->level != 10) { + rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; + return -EINVAL; + } + if (strcmp("near", argv[i])) { + rs->ti->error = "Invalid 'raid10_format' value given"; + return -EINVAL; + } + raid10_format = argv[i]; + rs->print_flags |= DMPF_RAID10_FORMAT; + continue; + } + if (strict_strtoul(argv[i], 10, &value) < 0) { rs->ti->error = "Bad numerical argument given in raid params"; return -EINVAL; } + /* Parameters that take a numeric value are checked here */ if (!strcasecmp(key, "rebuild")) { rebuild_cnt++; @@ -439,6 +478,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; } break; + case 10: default: DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); rs->ti->error = "Rebuild not supported for this RAID type"; @@ -495,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv, */ value /= 2; - if (rs->raid_type->level < 5) { + if ((rs->raid_type->level != 5) && + (rs->raid_type->level != 6)) { rs->ti->error = "Inappropriate argument: stripe_cache"; return -EINVAL; } @@ -520,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } else if (!strcasecmp(key, "region_size")) { rs->print_flags |= DMPF_REGION_SIZE; region_size = value; + } else if (!strcasecmp(key, "raid10_copies") && + (rs->raid_type->level == 10)) { + if ((value < 2) || (value > 0xFF)) { + rs->ti->error = "Bad value for 'raid10_copies'"; + return -EINVAL; + } + rs->print_flags |= DMPF_RAID10_COPIES; + raid10_copies = value; } else { DMERR("Unable to parse RAID parameter: %s", key); rs->ti->error = "Unable to parse RAID parameters"; @@ -538,8 +587,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv, if (dm_set_target_max_io_len(rs->ti, max_io_len)) return -EINVAL; - if ((rs->raid_type->level > 1) && - sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { + if (rs->raid_type->level == 10) { + if (raid10_copies > rs->md.raid_disks) { + rs->ti->error = "Not enough devices to satisfy specification"; + return -EINVAL; + } + + /* (Len * #mirrors) / #devices */ + sectors_per_dev = rs->ti->len * raid10_copies; + sector_div(sectors_per_dev, rs->md.raid_disks); + + rs->md.layout = raid10_format_to_md_layout(raid10_format, + raid10_copies); + rs->md.new_layout = rs->md.layout; + } else if ((rs->raid_type->level > 1) && + sector_div(sectors_per_dev, + (rs->md.raid_disks - rs->raid_type->parity_devs))) { rs->ti->error = "Target length not divisible by number of data devices"; return -EINVAL; } @@ -566,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) if (rs->raid_type->level == 1) return md_raid1_congested(&rs->md, bits); + if (rs->raid_type->level == 10) + return md_raid10_congested(&rs->md, bits); + return md_raid5_congested(&rs->md, bits); } @@ -884,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) case 6: redundancy = rs->raid_type->parity_devs; break; + case 10: + redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; + break; default: ti->error = "Unknown RAID type"; return -EINVAL; @@ -1049,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + if (ti->len != rs->md.array_sectors) { + ti->error = "Array size does not match requested target length"; + ret = -EINVAL; + goto size_mismatch; + } rs->callbacks.congested_fn = raid_is_congested; dm_table_add_target_callbacks(ti->table, &rs->callbacks); mddev_suspend(&rs->md); return 0; +size_mismatch: + md_stop(&rs->md); bad: context_free(rs); @@ -1203,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type, DMEMIT(" region_size %lu", rs->md.bitmap_info.chunksize >> 9); + if (rs->print_flags & DMPF_RAID10_COPIES) + DMEMIT(" raid10_copies %u", + raid10_md_layout_to_copies(rs->md.layout)); + + if (rs->print_flags & DMPF_RAID10_FORMAT) + DMEMIT(" raid10_format near"); + DMEMIT(" %d", rs->md.raid_disks); for (i = 0; i < rs->md.raid_disks; i++) { if (rs->dev[i].meta_dev) @@ -1277,7 +1360,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, @@ -1304,6 +1387,8 @@ module_init(dm_raid_init); module_exit(dm_raid_exit); MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); +MODULE_ALIAS("dm-raid1"); +MODULE_ALIAS("dm-raid10"); MODULE_ALIAS("dm-raid4"); MODULE_ALIAS("dm-raid5"); MODULE_ALIAS("dm-raid6"); diff --git a/drivers/md/md.c b/drivers/md/md.c index d5ab4493c8be..3f6203a4c7ea 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -/* Support for plugging. - * This mirrors the plugging support in request_queue, but does not - * require having a whole queue or request structures. - * We allocate an md_plug_cb for each md device and each thread it gets - * plugged on. This links tot the private plug_handle structure in the - * personality data where we keep a count of the number of outstanding - * plugs so other code can see if a plug is active. - */ -struct md_plug_cb { - struct blk_plug_cb cb; - struct mddev *mddev; -}; - -static void plugger_unplug(struct blk_plug_cb *cb) +void md_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); - if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) - md_wakeup_thread(mdcb->mddev->thread); - kfree(mdcb); -} - -/* Check that an unplug wakeup will come shortly. - * If not, wakeup the md thread immediately - */ -int mddev_check_plugged(struct mddev *mddev) -{ - struct blk_plug *plug = current->plug; - struct md_plug_cb *mdcb; - - if (!plug) - return 0; - - list_for_each_entry(mdcb, &plug->cb_list, cb.list) { - if (mdcb->cb.callback == plugger_unplug && - mdcb->mddev == mddev) { - /* Already on the list, move to top */ - if (mdcb != list_first_entry(&plug->cb_list, - struct md_plug_cb, - cb.list)) - list_move(&mdcb->cb.list, &plug->cb_list); - return 1; - } - } - /* Not currently on the callback list */ - mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); - if (!mdcb) - return 0; - - mdcb->mddev = mddev; - mdcb->cb.callback = plugger_unplug; - atomic_inc(&mddev->plug_cnt); - list_add(&mdcb->cb.list, &plug->cb_list); - return 1; + struct mddev *mddev = cb->data; + md_wakeup_thread(mddev->thread); + kfree(cb); } -EXPORT_SYMBOL_GPL(mddev_check_plugged); +EXPORT_SYMBOL(md_unplug); static inline struct mddev *mddev_get(struct mddev *mddev) { @@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); - atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); @@ -1157,8 +1108,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor ret = 0; } rdev->sectors = rdev->sb_start; - /* Limit to 4TB as metadata cannot record more than that */ - if (rdev->sectors >= (2ULL << 32)) + /* Limit to 4TB as metadata cannot record more than that. + * (not needed for Linear and RAID0 as metadata doesn't + * record this size) + */ + if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) rdev->sectors = (2ULL << 32) - 2; if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) @@ -1449,7 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) /* Limit to 4TB as metadata cannot record more than that. * 4TB == 2^32 KB, or 2*2^32 sectors. */ - if (num_sectors >= (2ULL << 32)) + if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); @@ -3942,17 +3896,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case clear: /* stopping an active array */ - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ - if (mddev->pers) { - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; + if (mddev->pers) err = do_md_stop(mddev, 2, NULL); - } else + else err = 0; /* already inactive */ break; case suspended: diff --git a/drivers/md/md.h b/drivers/md/md.h index 7b4a3c318cae..f385b038589d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -266,9 +266,6 @@ struct mddev { int new_chunk_sectors; int reshape_backwards; - atomic_t plug_cnt; /* If device is expecting - * more bios soon. - */ struct md_thread *thread; /* management thread */ struct md_thread *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ @@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern int mddev_check_plugged(struct mddev *mddev); extern void md_trim_bio(struct bio *bio, int offset, int size); + +extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); +static inline int mddev_check_plugged(struct mddev *mddev) +{ + return !!blk_check_plugged(md_unplug, mddev, + sizeof(struct blk_plug_cb)); +} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index cacd008d6864..611b5f797618 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -46,6 +46,20 @@ */ #define NR_RAID1_BIOS 256 +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + /* When there are this many requests queue to be written by * the raid1 thread, we become 'congested' to provide back-pressure * for writeback. @@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect const sector_t this_sector = r1_bio->sector; int sectors; int best_good_sectors; - int start_disk; - int best_disk; - int i; + int best_disk, best_dist_disk, best_pending_disk; + int has_nonrot_disk; + int disk; sector_t best_dist; + unsigned int min_pending; struct md_rdev *rdev; int choose_first; + int choose_next_idle; rcu_read_lock(); /* @@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect retry: sectors = r1_bio->sectors; best_disk = -1; + best_dist_disk = -1; best_dist = MaxSector; + best_pending_disk = -1; + min_pending = UINT_MAX; best_good_sectors = 0; + has_nonrot_disk = 0; + choose_next_idle = 0; if (conf->mddev->recovery_cp < MaxSector && - (this_sector + sectors >= conf->next_resync)) { + (this_sector + sectors >= conf->next_resync)) choose_first = 1; - start_disk = 0; - } else { + else choose_first = 0; - start_disk = conf->last_used; - } - for (i = 0 ; i < conf->raid_disks * 2 ; i++) { + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; sector_t first_bad; int bad_sectors; - - int disk = start_disk + i; - if (disk >= conf->raid_disks * 2) - disk -= conf->raid_disks * 2; + unsigned int pending; + bool nonrot; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED @@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } else best_good_sectors = sectors; + nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); + has_nonrot_disk |= nonrot; + pending = atomic_read(&rdev->nr_pending); dist = abs(this_sector - conf->mirrors[disk].head_position); - if (choose_first - /* Don't change to another disk for sequential reads */ - || conf->next_seq_sect == this_sector - || dist == 0 - /* If device is idle, use it */ - || atomic_read(&rdev->nr_pending) == 0) { + if (choose_first) { + best_disk = disk; + break; + } + /* Don't change to another disk for sequential reads */ + if (conf->mirrors[disk].next_seq_sect == this_sector + || dist == 0) { + int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; + struct raid1_info *mirror = &conf->mirrors[disk]; + + best_disk = disk; + /* + * If buffered sequential IO size exceeds optimal + * iosize, check if there is idle disk. If yes, choose + * the idle disk. read_balance could already choose an + * idle disk before noticing it's a sequential IO in + * this disk. This doesn't matter because this disk + * will idle, next time it will be utilized after the + * first disk has IO size exceeds optimal iosize. In + * this way, iosize of the first disk will be optimal + * iosize at least. iosize of the second disk might be + * small, but not a big deal since when the second disk + * starts IO, the first disk is likely still busy. + */ + if (nonrot && opt_iosize > 0 && + mirror->seq_start != MaxSector && + mirror->next_seq_sect > opt_iosize && + mirror->next_seq_sect - opt_iosize >= + mirror->seq_start) { + choose_next_idle = 1; + continue; + } + break; + } + /* If device is idle, use it */ + if (pending == 0) { best_disk = disk; break; } + + if (choose_next_idle) + continue; + + if (min_pending > pending) { + min_pending = pending; + best_pending_disk = disk; + } + if (dist < best_dist) { best_dist = dist; - best_disk = disk; + best_dist_disk = disk; } } + /* + * If all disks are rotational, choose the closest disk. If any disk is + * non-rotational, choose the disk with less pending request even the + * disk is rotational, which might/might not be optimal for raids with + * mixed ratation/non-rotational disks depending on workload. + */ + if (best_disk == -1) { + if (has_nonrot_disk) + best_disk = best_pending_disk; + else + best_disk = best_dist_disk; + } + if (best_disk >= 0) { rdev = rcu_dereference(conf->mirrors[best_disk].rdev); if (!rdev) @@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect goto retry; } sectors = best_good_sectors; - conf->next_seq_sect = this_sector + sectors; - conf->last_used = best_disk; + + if (conf->mirrors[best_disk].next_seq_sect != this_sector) + conf->mirrors[best_disk].seq_start = this_sector; + + conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; } rcu_read_unlock(); *max_sectors = sectors; @@ -870,10 +944,48 @@ do_sync_io: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + int pending_cnt; +}; + +static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) +{ + struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, + cb); + struct mddev *mddev = plug->cb.data; + struct r1conf *conf = mddev->private; + struct bio *bio; + + if (from_schedule) { + spin_lock_irq(&conf->device_lock); + bio_list_merge(&conf->pending_bio_list, &plug->pending); + conf->pending_count += plug->pending_cnt; + spin_unlock_irq(&conf->device_lock); + md_wakeup_thread(mddev->thread); + kfree(plug); + return; + } + + /* we aren't scheduling, so we can do the write-out directly. */ + bio = bio_list_get(&plug->pending); + bitmap_unplug(mddev->bitmap); + wake_up(&conf->wait_barrier); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + kfree(plug); +} + static void make_request(struct mddev *mddev, struct bio * bio) { struct r1conf *conf = mddev->private; - struct mirror_info *mirror; + struct raid1_info *mirror; struct r1bio *r1_bio; struct bio *read_bio; int i, disks; @@ -883,6 +995,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; + struct blk_plug_cb *cb; + struct raid1_plug_cb *plug = NULL; int first_clone; int sectors_handled; int max_sectors; @@ -1185,11 +1299,22 @@ read_again: mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); + + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid1_plug_cb, cb); + else + plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) + if (!plug) md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, @@ -1364,7 +1489,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = -EEXIST; int mirror = 0; - struct mirror_info *p; + struct raid1_info *p; int first = 0; int last = conf->raid_disks - 1; struct request_queue *q = bdev_get_queue(rdev->bdev); @@ -1433,7 +1558,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; - struct mirror_info *p = conf->mirrors+ number; + struct raid1_info *p = conf->mirrors + number; if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; @@ -2173,8 +2298,7 @@ static void raid1d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2371,6 +2495,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bio->bi_rw = READ; bio->bi_end_io = end_sync_read; read_targets++; + } else if (!test_bit(WriteErrorSeen, &rdev->flags) && + test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && + !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + /* + * The device is suitable for reading (InSync), + * but has bad block(s) here. Let's try to correct them, + * if we are doing resync or repair. Otherwise, leave + * this device alone for this sync request. + */ + bio->bi_rw = WRITE; + bio->bi_end_io = end_sync_write; + write_targets++; } } if (bio->bi_end_io) { @@ -2428,7 +2564,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ - sector_t rv = max_sector - sector_nr; + sector_t rv; + if (min_bad > 0) + max_sector = sector_nr + min_bad; + rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); return rv; @@ -2521,7 +2660,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) { struct r1conf *conf; int i; - struct mirror_info *disk; + struct raid1_info *disk; struct md_rdev *rdev; int err = -ENOMEM; @@ -2529,7 +2668,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf) goto abort; - conf->mirrors = kzalloc(sizeof(struct mirror_info) + conf->mirrors = kzalloc(sizeof(struct raid1_info) * mddev->raid_disks * 2, GFP_KERNEL); if (!conf->mirrors) @@ -2572,6 +2711,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) mddev->merge_check_needed = 1; disk->head_position = 0; + disk->seq_start = MaxSector; } conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; @@ -2585,7 +2725,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->recovery_disabled = mddev->recovery_disabled - 1; err = -EIO; - conf->last_used = -1; for (i = 0; i < conf->raid_disks * 2; i++) { disk = conf->mirrors + i; @@ -2611,19 +2750,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev && (disk->rdev->saved_raid_disk < 0)) conf->fullsync = 1; - } else if (conf->last_used < 0) - /* - * The first working device is used as a - * starting point to read balancing. - */ - conf->last_used = i; + } } - if (conf->last_used < 0) { - printk(KERN_ERR "md/raid1:%s: no operational mirrors\n", - mdname(mddev)); - goto abort; - } err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { @@ -2798,7 +2927,7 @@ static int raid1_reshape(struct mddev *mddev) */ mempool_t *newpool, *oldpool; struct pool_info *newpoolinfo; - struct mirror_info *newmirrors; + struct raid1_info *newmirrors; struct r1conf *conf = mddev->private; int cnt, raid_disks; unsigned long flags; @@ -2841,7 +2970,7 @@ static int raid1_reshape(struct mddev *mddev) kfree(newpoolinfo); return -ENOMEM; } - newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2, + newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); @@ -2880,7 +3009,6 @@ static int raid1_reshape(struct mddev *mddev) conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; - conf->last_used = 0; /* just make sure it is in-range */ lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 80ded139314c..0ff3715fb7eb 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -1,9 +1,15 @@ #ifndef _RAID1_H #define _RAID1_H -struct mirror_info { +struct raid1_info { struct md_rdev *rdev; sector_t head_position; + + /* When choose the best device for a read (read_balance()) + * we try to keep sequential reads one the same device + */ + sector_t next_seq_sect; + sector_t seq_start; }; /* @@ -24,17 +30,11 @@ struct pool_info { struct r1conf { struct mddev *mddev; - struct mirror_info *mirrors; /* twice 'raid_disks' to + struct raid1_info *mirrors; /* twice 'raid_disks' to * allow for replacements. */ int raid_disks; - /* When choose the best device for a read (read_balance()) - * we try to keep sequential reads one the same device - * using 'last_used' and 'next_seq_sect' - */ - int last_used; - sector_t next_seq_sect; /* During resync, read_balancing is only allowed on the part * of the array that has been resynced. 'next_resync' tells us * where that is. @@ -135,20 +135,6 @@ struct r1bio { /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio *)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting bios[n] to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r1bio.state */ #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8da6282254c3..1c2eb38f3c51 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -60,7 +60,21 @@ */ #define NR_RAID10_BIOS 256 -/* When there are this many requests queue to be written by +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + +/* When there are this many requests queued to be written by * the raid10 thread, we become 'congested' to provide back-pressure * for writeback. */ @@ -645,7 +659,11 @@ static int raid10_mergeable_bvec(struct request_queue *q, max = biovec->bv_len; if (mddev->merge_check_needed) { - struct r10bio r10_bio; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10_bio = &on_stack.r10_bio; int s; if (conf->reshape_progress != MaxSector) { /* Cannot give any guidance during reshape */ @@ -653,18 +671,18 @@ static int raid10_mergeable_bvec(struct request_queue *q, return biovec->bv_len; return 0; } - r10_bio.sector = sector; - raid10_find_phys(conf, &r10_bio); + r10_bio->sector = sector; + raid10_find_phys(conf, r10_bio); rcu_read_lock(); for (s = 0; s < conf->copies; s++) { - int disk = r10_bio.devs[s].devnum; + int disk = r10_bio->devs[s].devnum; struct md_rdev *rdev = rcu_dereference( conf->mirrors[disk].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -676,7 +694,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -717,7 +735,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, int sectors = r10_bio->sectors; int best_good_sectors; sector_t new_distance, best_dist; - struct md_rdev *rdev, *best_rdev; + struct md_rdev *best_rdev, *rdev = NULL; int do_balance; int best_slot; struct geom *geo = &conf->geo; @@ -839,9 +857,8 @@ retry: return rdev; } -static int raid10_congested(void *data, int bits) +int md_raid10_congested(struct mddev *mddev, int bits) { - struct mddev *mddev = data; struct r10conf *conf = mddev->private; int i, ret = 0; @@ -849,8 +866,6 @@ static int raid10_congested(void *data, int bits) conf->pending_count >= max_queued_requests) return 1; - if (mddev_congested(mddev, bits)) - return 1; rcu_read_lock(); for (i = 0; (i < conf->geo.raid_disks || i < conf->prev.raid_disks) @@ -866,6 +881,15 @@ static int raid10_congested(void *data, int bits) rcu_read_unlock(); return ret; } +EXPORT_SYMBOL_GPL(md_raid10_congested); + +static int raid10_congested(void *data, int bits) +{ + struct mddev *mddev = data; + + return mddev_congested(mddev, bits) || + md_raid10_congested(mddev, bits); +} static void flush_pending_writes(struct r10conf *conf) { @@ -1546,7 +1570,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) static void print_conf(struct r10conf *conf) { int i; - struct mirror_info *tmp; + struct raid10_info *tmp; printk(KERN_DEBUG "RAID10 conf printout:\n"); if (!conf) { @@ -1580,7 +1604,7 @@ static int raid10_spare_active(struct mddev *mddev) { int i; struct r10conf *conf = mddev->private; - struct mirror_info *tmp; + struct raid10_info *tmp; int count = 0; unsigned long flags; @@ -1655,7 +1679,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) else mirror = first; for ( ; mirror <= last ; mirror++) { - struct mirror_info *p = &conf->mirrors[mirror]; + struct raid10_info *p = &conf->mirrors[mirror]; if (p->recovery_disabled == mddev->recovery_disabled) continue; if (p->rdev) { @@ -1709,7 +1733,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int err = 0; int number = rdev->raid_disk; struct md_rdev **rdevp; - struct mirror_info *p = conf->mirrors + number; + struct raid10_info *p = conf->mirrors + number; print_conf(conf); if (rdev == p->rdev) @@ -2660,8 +2684,7 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2876,7 +2899,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sect; int must_sync; int any_working; - struct mirror_info *mirror = &conf->mirrors[i]; + struct raid10_info *mirror = &conf->mirrors[i]; if ((mirror->rdev == NULL || test_bit(In_sync, &mirror->rdev->flags)) @@ -3388,7 +3411,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) goto out; /* FIXME calc properly */ - conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks + + conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + max(0,mddev->delta_disks)), GFP_KERNEL); if (!conf->mirrors) @@ -3452,7 +3475,7 @@ static int run(struct mddev *mddev) { struct r10conf *conf; int i, disk_idx, chunk_size; - struct mirror_info *disk; + struct raid10_info *disk; struct md_rdev *rdev; sector_t size; sector_t min_offset_diff = 0; @@ -3472,12 +3495,14 @@ static int run(struct mddev *mddev) conf->thread = NULL; chunk_size = mddev->chunk_sectors << 9; - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + if (mddev->queue) { + blk_queue_io_min(mddev->queue, chunk_size); + if (conf->geo.raid_disks % conf->geo.near_copies) + blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); + else + blk_queue_io_opt(mddev->queue, chunk_size * + (conf->geo.raid_disks / conf->geo.near_copies)); + } rdev_for_each(rdev, mddev) { long long diff; @@ -3511,8 +3536,9 @@ static int run(struct mddev *mddev) if (first || diff < min_offset_diff) min_offset_diff = diff; - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + if (mddev->gendisk) + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); disk->head_position = 0; } @@ -3575,22 +3601,22 @@ static int run(struct mddev *mddev) md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; - mddev->queue->backing_dev_info.congested_fn = raid10_congested; - mddev->queue->backing_dev_info.congested_data = mddev; - - /* Calculate max read-ahead size. - * We need to readahead at least twice a whole stripe.... - * maybe... - */ - { + if (mddev->queue) { int stripe = conf->geo.raid_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); + mddev->queue->backing_dev_info.congested_fn = raid10_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + /* Calculate max read-ahead size. + * We need to readahead at least twice a whole stripe.... + * maybe... + */ stripe /= conf->geo.near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); } - blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); if (md_integrity_register(mddev)) goto out_free_conf; @@ -3641,7 +3667,10 @@ static int stop(struct mddev *mddev) lower_barrier(conf); md_unregister_thread(&mddev->thread); - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + if (mddev->queue) + /* the unplug fn references 'conf'*/ + blk_sync_queue(mddev->queue); + if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); @@ -3805,7 +3834,7 @@ static int raid10_check_reshape(struct mddev *mddev) if (mddev->delta_disks > 0) { /* allocate new 'mirrors' list */ conf->mirrors_new = kzalloc( - sizeof(struct mirror_info) + sizeof(struct raid10_info) *(mddev->raid_disks + mddev->delta_disks), GFP_KERNEL); @@ -3930,7 +3959,7 @@ static int raid10_start_reshape(struct mddev *mddev) spin_lock_irq(&conf->device_lock); if (conf->mirrors_new) { memcpy(conf->mirrors_new, conf->mirrors, - sizeof(struct mirror_info)*conf->prev.raid_disks); + sizeof(struct raid10_info)*conf->prev.raid_disks); smp_mb(); kfree(conf->mirrors_old); /* FIXME and elsewhere */ conf->mirrors_old = conf->mirrors; @@ -4389,14 +4418,18 @@ static int handle_reshape_read_error(struct mddev *mddev, { /* Use sync reads to get the blocks from somewhere else */ int sectors = r10_bio->sectors; - struct r10bio r10b; struct r10conf *conf = mddev->private; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10b = &on_stack.r10_bio; int slot = 0; int idx = 0; struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; - r10b.sector = r10_bio->sector; - __raid10_find_phys(&conf->prev, &r10b); + r10b->sector = r10_bio->sector; + __raid10_find_phys(&conf->prev, r10b); while (sectors) { int s = sectors; @@ -4407,7 +4440,7 @@ static int handle_reshape_read_error(struct mddev *mddev, s = PAGE_SIZE >> 9; while (!success) { - int d = r10b.devs[slot].devnum; + int d = r10b->devs[slot].devnum; struct md_rdev *rdev = conf->mirrors[d].rdev; sector_t addr; if (rdev == NULL || @@ -4415,7 +4448,7 @@ static int handle_reshape_read_error(struct mddev *mddev, !test_bit(In_sync, &rdev->flags)) goto failed; - addr = r10b.devs[slot].addr + idx * PAGE_SIZE; + addr = r10b->devs[slot].addr + idx * PAGE_SIZE; success = sync_page_io(rdev, addr, s << 9, diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 135b1b0a1554..1054cf602345 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -1,7 +1,7 @@ #ifndef _RAID10_H #define _RAID10_H -struct mirror_info { +struct raid10_info { struct md_rdev *rdev, *replacement; sector_t head_position; int recovery_disabled; /* matches @@ -13,8 +13,8 @@ struct mirror_info { struct r10conf { struct mddev *mddev; - struct mirror_info *mirrors; - struct mirror_info *mirrors_new, *mirrors_old; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new, *mirrors_old; spinlock_t device_lock; /* geometry */ @@ -110,7 +110,7 @@ struct r10bio { * We choose the number when they are allocated. * We sometimes need an extra bio to write to the replacement. */ - struct { + struct r10dev { struct bio *bio; union { struct bio *repl_bio; /* used for resync and @@ -123,20 +123,6 @@ struct r10bio { } devs[0]; }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting devs[n].bio to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r10bio.state */ enum r10bio_state { R10BIO_Uptodate, @@ -159,4 +145,7 @@ enum r10bio_state { */ R10BIO_Previous, }; + +extern int md_raid10_congested(struct mddev *mddev, int bits); + #endif diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 04348d76bb30..adda94df5eb2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) * We maintain a biased count of active stripes in the bottom 16 bits of * bi_phys_segments, and a count of processed stripes in the upper 16 bits */ -static inline int raid5_bi_phys_segments(struct bio *bio) +static inline int raid5_bi_processed_stripes(struct bio *bio) { - return bio->bi_phys_segments & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return (atomic_read(segments) >> 16) & 0xffff; } -static inline int raid5_bi_hw_segments(struct bio *bio) +static inline int raid5_dec_bi_active_stripes(struct bio *bio) { - return (bio->bi_phys_segments >> 16) & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return atomic_sub_return(1, segments) & 0xffff; } -static inline int raid5_dec_bi_phys_segments(struct bio *bio) +static inline void raid5_inc_bi_active_stripes(struct bio *bio) { - --bio->bi_phys_segments; - return raid5_bi_phys_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_inc(segments); } -static inline int raid5_dec_bi_hw_segments(struct bio *bio) +static inline void raid5_set_bi_processed_stripes(struct bio *bio, + unsigned int cnt) { - unsigned short val = raid5_bi_hw_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + int old, new; - --val; - bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); - return val; + do { + old = atomic_read(segments); + new = (old & 0xffff) | (cnt << 16); + } while (atomic_cmpxchg(segments, old, new) != old); } -static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) { - bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_set(segments, cnt); } /* Find first data disk in a raid6 stripe */ @@ -190,49 +196,56 @@ static int stripe_operations_active(struct stripe_head *sh) test_bit(STRIPE_COMPUTE_RUN, &sh->state); } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) { - if (atomic_dec_and_test(&sh->count)) { - BUG_ON(!list_empty(&sh->lru)); - BUG_ON(atomic_read(&conf->active_stripes)==0); - if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state) && - !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - list_add_tail(&sh->lru, &conf->delayed_list); - else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) - list_add_tail(&sh->lru, &conf->bitmap_list); - else { - clear_bit(STRIPE_DELAYED, &sh->state); - clear_bit(STRIPE_BIT_DELAY, &sh->state); - list_add_tail(&sh->lru, &conf->handle_list); - } - md_wakeup_thread(conf->mddev->thread); - } else { - BUG_ON(stripe_operations_active(sh)); - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - if (atomic_dec_return(&conf->preread_active_stripes) - < IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + BUG_ON(!list_empty(&sh->lru)); + BUG_ON(atomic_read(&conf->active_stripes)==0); + if (test_bit(STRIPE_HANDLE, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + list_add_tail(&sh->lru, &conf->delayed_list); + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) + list_add_tail(&sh->lru, &conf->bitmap_list); + else { + clear_bit(STRIPE_DELAYED, &sh->state); + clear_bit(STRIPE_BIT_DELAY, &sh->state); + list_add_tail(&sh->lru, &conf->handle_list); + } + md_wakeup_thread(conf->mddev->thread); + } else { + BUG_ON(stripe_operations_active(sh)); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + if (atomic_dec_return(&conf->preread_active_stripes) + < IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + atomic_dec(&conf->active_stripes); + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + list_add_tail(&sh->lru, &conf->inactive_list); + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); } } } +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +{ + if (atomic_dec_and_test(&sh->count)) + do_release_stripe(conf, sh); +} + static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - __release_stripe(conf, sh); - spin_unlock_irqrestore(&conf->device_lock, flags); + local_irq_save(flags); + if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { + do_release_stripe(conf, sh); + spin_unlock(&conf->device_lock); + } + local_irq_restore(flags); } static inline void remove_hash(struct stripe_head *sh) @@ -471,7 +484,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector, } else { if (atomic_read(&sh->count)) { BUG_ON(!list_empty(&sh->lru) - && !test_bit(STRIPE_EXPANDING, &sh->state)); + && !test_bit(STRIPE_EXPANDING, &sh->state) + && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); @@ -640,6 +654,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) else bi->bi_sector = (sh->sector + rdev->data_offset); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + bi->bi_rw |= REQ_FLUSH; + bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_idx = 0; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -749,14 +766,12 @@ static void ops_complete_biofill(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; - struct r5conf *conf = sh->raid_conf; int i; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); /* clear completed biofills */ - spin_lock_irq(&conf->device_lock); for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -774,7 +789,7 @@ static void ops_complete_biofill(void *stripe_head_ref) while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); - if (!raid5_dec_bi_phys_segments(rbi)) { + if (!raid5_dec_bi_active_stripes(rbi)) { rbi->bi_next = return_bi; return_bi = rbi; } @@ -782,7 +797,6 @@ static void ops_complete_biofill(void *stripe_head_ref) } } } - spin_unlock_irq(&conf->device_lock); clear_bit(STRIPE_BIOFILL_RUN, &sh->state); return_io(return_bi); @@ -794,7 +808,6 @@ static void ops_complete_biofill(void *stripe_head_ref) static void ops_run_biofill(struct stripe_head *sh) { struct dma_async_tx_descriptor *tx = NULL; - struct r5conf *conf = sh->raid_conf; struct async_submit_ctl submit; int i; @@ -805,10 +818,10 @@ static void ops_run_biofill(struct stripe_head *sh) struct r5dev *dev = &sh->dev[i]; if (test_bit(R5_Wantfill, &dev->flags)) { struct bio *rbi; - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); dev->read = rbi = dev->toread; dev->toread = NULL; - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, @@ -1144,12 +1157,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { struct bio *wbi; - spin_lock_irq(&sh->raid_conf->device_lock); + spin_lock_irq(&sh->stripe_lock); chosen = dev->towrite; dev->towrite = NULL; BUG_ON(dev->written); wbi = dev->written = chosen; - spin_unlock_irq(&sh->raid_conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { @@ -1454,6 +1467,8 @@ static int grow_one_stripe(struct r5conf *conf) init_waitqueue_head(&sh->ops.wait_for_ops); #endif + spin_lock_init(&sh->stripe_lock); + if (grow_buffers(sh)) { shrink_buffers(sh); kmem_cache_free(conf->slab_cache, sh); @@ -1739,7 +1754,9 @@ static void raid5_end_read_request(struct bio * bi, int error) atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - } + } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + if (atomic_read(&rdev->read_errors)) atomic_set(&rdev->read_errors, 0); } else { @@ -1784,7 +1801,11 @@ static void raid5_end_read_request(struct bio * bi, int error) else retry = 1; if (retry) - set_bit(R5_ReadError, &sh->dev[i].flags); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + set_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + } else + set_bit(R5_ReadNoMerge, &sh->dev[i].flags); else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); @@ -2340,11 +2361,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector); - - spin_lock_irq(&conf->device_lock); + /* + * If several bio share a stripe. The bio bi_phys_segments acts as a + * reference count to avoid race. The reference count should already be + * increased before this function is called (for example, in + * make_request()), so other bio sharing this stripe will not free the + * stripe. If a stripe is owned by one stripe, the stripe lock will + * protect it. + */ + spin_lock_irq(&sh->stripe_lock); if (forwrite) { bip = &sh->dev[dd_idx].towrite; - if (*bip == NULL && sh->dev[dd_idx].written == NULL) + if (*bip == NULL) firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; @@ -2360,7 +2388,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (*bip) bi->bi_next = *bip; *bip = bi; - bi->bi_phys_segments++; + raid5_inc_bi_active_stripes(bi); if (forwrite) { /* check if page is covered */ @@ -2375,7 +2403,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_sector, @@ -2391,7 +2419,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in overlap: set_bit(R5_Overlap, &sh->dev[dd_idx].flags); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); return 0; } @@ -2441,10 +2469,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, rdev_dec_pending(rdev, conf->mddev); } } - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); /* fail all writes first */ bi = sh->dev[i].towrite; sh->dev[i].towrite = NULL; + spin_unlock_irq(&sh->stripe_lock); if (bi) { s->to_write--; bitmap_end = 1; @@ -2457,13 +2486,17 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } + if (bitmap_end) + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0, 0); + bitmap_end = 0; /* and fail all 'written' */ bi = sh->dev[i].written; sh->dev[i].written = NULL; @@ -2472,7 +2505,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; @@ -2496,14 +2529,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } } - spin_unlock_irq(&conf->device_lock); if (bitmap_end) bitmap_endwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0, 0); @@ -2707,30 +2739,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, test_bit(R5_UPTODATE, &dev->flags)) { /* We can return any write requests */ struct bio *wbi, *wbi2; - int bitmap_end = 0; pr_debug("Return write for disc %d\n", i); - spin_lock_irq(&conf->device_lock); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); - if (!raid5_dec_bi_phys_segments(wbi)) { + if (!raid5_dec_bi_active_stripes(wbi)) { md_write_end(conf->mddev); wbi->bi_next = *return_bi; *return_bi = wbi; } wbi = wbi2; } - if (dev->towrite == NULL) - bitmap_end = 1; - spin_unlock_irq(&conf->device_lock); - if (bitmap_end) - bitmap_endwrite(conf->mddev->bitmap, - sh->sector, - STRIPE_SECTORS, + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), - 0); + 0); } } @@ -3182,7 +3207,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) /* Now to look around and see what can be done */ rcu_read_lock(); - spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { struct md_rdev *rdev; sector_t first_bad; @@ -3328,7 +3352,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) do_recovery = 1; } } - spin_unlock_irq(&conf->device_lock); if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, * we must be recovering. @@ -3791,7 +3814,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) * this sets the active strip count to 1 and the processed * strip count to zero (upper 8 bits) */ - bi->bi_phys_segments = 1; /* biased count of active stripes */ + raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ } return bi; @@ -3988,6 +4011,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) return sh; } +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; +}; + +static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) +{ + struct raid5_plug_cb *cb = container_of( + blk_cb, struct raid5_plug_cb, cb); + struct stripe_head *sh; + struct mddev *mddev = cb->cb.data; + struct r5conf *conf = mddev->private; + + if (cb->list.next && !list_empty(&cb->list)) { + spin_lock_irq(&conf->device_lock); + while (!list_empty(&cb->list)) { + sh = list_first_entry(&cb->list, struct stripe_head, lru); + list_del_init(&sh->lru); + /* + * avoid race release_stripe_plug() sees + * STRIPE_ON_UNPLUG_LIST clear but the stripe + * is still in our list + */ + smp_mb__before_clear_bit(); + clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); + __release_stripe(conf, sh); + } + spin_unlock_irq(&conf->device_lock); + } + kfree(cb); +} + +static void release_stripe_plug(struct mddev *mddev, + struct stripe_head *sh) +{ + struct blk_plug_cb *blk_cb = blk_check_plugged( + raid5_unplug, mddev, + sizeof(struct raid5_plug_cb)); + struct raid5_plug_cb *cb; + + if (!blk_cb) { + release_stripe(sh); + return; + } + + cb = container_of(blk_cb, struct raid5_plug_cb, cb); + + if (cb->list.next == NULL) + INIT_LIST_HEAD(&cb->list); + + if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) + list_add_tail(&sh->lru, &cb->list); + else + release_stripe(sh); +} + static void make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; @@ -4113,11 +4192,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); - if ((bi->bi_rw & REQ_SYNC) && + if ((bi->bi_rw & REQ_NOIDLE) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); - mddev_check_plugged(mddev); - release_stripe(sh); + release_stripe_plug(mddev, sh); } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -4126,9 +4204,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) } } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(bi); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(bi); if (remaining == 0) { if ( rw == WRITE ) @@ -4484,7 +4560,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) sector += STRIPE_SECTORS, scnt++) { - if (scnt < raid5_bi_hw_segments(raid_bio)) + if (scnt < raid5_bi_processed_stripes(raid_bio)) /* already done this stripe */ continue; @@ -4492,25 +4568,24 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) if (!sh) { /* failed to get a stripe - must wait */ - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { release_stripe(sh); - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } + set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); handle_stripe(sh); release_stripe(sh); handled++; } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(raid_bio); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(raid_bio); if (remaining == 0) bio_endio(raid_bio, 0); if (atomic_dec_and_test(&conf->active_aligned_reads)) @@ -4518,6 +4593,30 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) return handled; } +#define MAX_STRIPE_BATCH 8 +static int handle_active_stripes(struct r5conf *conf) +{ + struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; + int i, batch_size = 0; + + while (batch_size < MAX_STRIPE_BATCH && + (sh = __get_priority_stripe(conf)) != NULL) + batch[batch_size++] = sh; + + if (batch_size == 0) + return batch_size; + spin_unlock_irq(&conf->device_lock); + + for (i = 0; i < batch_size; i++) + handle_stripe(batch[i]); + + cond_resched(); + + spin_lock_irq(&conf->device_lock); + for (i = 0; i < batch_size; i++) + __release_stripe(conf, batch[i]); + return batch_size; +} /* * This is our raid5 kernel thread. @@ -4528,7 +4627,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) */ static void raid5d(struct mddev *mddev) { - struct stripe_head *sh; struct r5conf *conf = mddev->private; int handled; struct blk_plug plug; @@ -4542,8 +4640,9 @@ static void raid5d(struct mddev *mddev) spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; + int batch_size; - if (atomic_read(&mddev->plug_cnt) == 0 && + if ( !list_empty(&conf->bitmap_list)) { /* Now is a good time to flush some bitmap updates */ conf->seq_flush++; @@ -4553,8 +4652,7 @@ static void raid5d(struct mddev *mddev) conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } - if (atomic_read(&mddev->plug_cnt) == 0) - raid5_activate_delayed(conf); + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; @@ -4566,21 +4664,16 @@ static void raid5d(struct mddev *mddev) handled++; } - sh = __get_priority_stripe(conf); - - if (!sh) + batch_size = handle_active_stripes(conf); + if (!batch_size) break; - spin_unlock_irq(&conf->device_lock); - - handled++; - handle_stripe(sh); - release_stripe(sh); - cond_resched(); + handled += batch_size; - if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) + if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { + spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); - - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&conf->device_lock); + } } pr_debug("%d stripes handled\n", handled); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2164021f3b5f..a9fc24901eda 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -210,6 +210,7 @@ struct stripe_head { int disks; /* disks in stripe */ enum check_states check_state; enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -273,6 +274,7 @@ enum r5dev_flags { R5_Wantwrite, R5_Overlap, /* There is a pending overlapping request * on this block */ + R5_ReadNoMerge, /* prevent bio from merging in block-layer */ R5_ReadError, /* seen a read error here recently */ R5_ReWrite, /* have tried to over-write the readerror */ @@ -319,6 +321,7 @@ enum { STRIPE_BIOFILL_RUN, STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, + STRIPE_ON_UNPLUG_LIST, }; /* diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c index 664e460f247b..aac622200e99 100644 --- a/drivers/media/dvb/siano/smsusb.c +++ b/drivers/media/dvb/siano/smsusb.c @@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf) return 0; } -static const struct usb_device_id smsusb_id_table[] __devinitconst = { +static const struct usb_device_id smsusb_id_table[] = { { USB_DEVICE(0x187f, 0x0010), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, { USB_DEVICE(0x187f, 0x0100), diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c index d0b6bb507634..72ded29728bb 100644 --- a/drivers/media/radio/radio-shark.c +++ b/drivers/media/radio/radio-shark.c @@ -35,6 +35,11 @@ #include <media/v4l2-device.h> #include <sound/tea575x-tuner.h> +#if defined(CONFIG_LEDS_CLASS) || \ + (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) +#define SHARK_USE_LEDS 1 +#endif + /* * Version Information */ @@ -56,44 +61,18 @@ MODULE_LICENSE("GPL"); enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_blue_pulse(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, - enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { - [BLUE_LED] = { - .name = "%s:blue:", - .brightness = LED_OFF, - .max_brightness = 127, - .brightness_set = shark_led_set_blue, - }, - [BLUE_PULSE_LED] = { - .name = "%s:blue-pulse:", - .brightness = LED_OFF, - .max_brightness = 255, - .brightness_set = shark_led_set_blue_pulse, - }, - [RED_LED] = { - .name = "%s:red:", - .brightness = LED_OFF, - .max_brightness = 1, - .brightness_set = shark_led_set_red, - }, -}; - struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct snd_tea575x tea; +#ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; +#endif u8 *transfer_buffer; u32 last_val; @@ -175,20 +154,13 @@ static struct snd_tea575x_ops shark_tea_ops = { .read_val = shark_read_val, }; +#ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; - /* - * We use the v4l2_dev lock and registered bit to ensure the device - * does not get unplugged and unreffed while we're running. - */ - mutex_lock(&shark->tea.mutex); - if (!video_is_registered(&shark->tea.vd)) - goto leave; - for (i = 0; i < 3; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) continue; @@ -208,8 +180,6 @@ static void shark_led_work(struct work_struct *work) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } -leave: - mutex_unlock(&shark->tea.mutex); } static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -245,19 +215,78 @@ static void shark_led_set_red(struct led_classdev *led_cdev, schedule_work(&shark->led_work); } +static const struct led_classdev shark_led_templates[NO_LEDS] = { + [BLUE_LED] = { + .name = "%s:blue:", + .brightness = LED_OFF, + .max_brightness = 127, + .brightness_set = shark_led_set_blue, + }, + [BLUE_PULSE_LED] = { + .name = "%s:blue-pulse:", + .brightness = LED_OFF, + .max_brightness = 255, + .brightness_set = shark_led_set_blue_pulse, + }, + [RED_LED] = { + .name = "%s:red:", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = shark_led_set_red, + }, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + int i, retval; + + INIT_WORK(&shark->led_work, shark_led_work); + for (i = 0; i < NO_LEDS; i++) { + shark->leds[i] = shark_led_templates[i]; + snprintf(shark->led_names[i], sizeof(shark->led_names[0]), + shark->leds[i].name, shark->v4l2_dev.name); + shark->leds[i].name = shark->led_names[i]; + retval = led_classdev_register(dev, &shark->leds[i]); + if (retval) { + v4l2_err(&shark->v4l2_dev, + "couldn't register led: %s\n", + shark->led_names[i]); + return retval; + } + } + return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ + int i; + + for (i = 0; i < NO_LEDS; i++) + led_classdev_unregister(&shark->leds[i]); + + cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + v4l2_warn(&shark->v4l2_dev, + "CONFIG_LED_CLASS not enabled, LED support disabled\n"); + return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif + static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - int i; mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); snd_tea575x_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); - for (i = 0; i < NO_LEDS; i++) - led_classdev_unregister(&shark->leds[i]); + shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } @@ -266,7 +295,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - cancel_work_sync(&shark->led_work); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); @@ -276,7 +304,7 @@ static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; - int i, retval = -ENOMEM; + int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) @@ -286,17 +314,13 @@ static int usb_shark_probe(struct usb_interface *intf, if (!shark->transfer_buffer) goto err_alloc_buffer; - /* - * Work around a bug in usbhid/hid-core.c, where it leaves a dangling - * pointer in intfdata causing v4l2-device.c to not set it. Which - * results in usb_shark_disconnect() referencing the dangling pointer - * - * REMOVE (as soon as the above bug is fixed, patch submitted) - */ - usb_set_intfdata(intf, NULL); + v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + + retval = shark_register_leds(shark, &intf->dev); + if (retval) + goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; - v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -320,32 +344,13 @@ static int usb_shark_probe(struct usb_interface *intf, goto err_init_tea; } - INIT_WORK(&shark->led_work, shark_led_work); - for (i = 0; i < NO_LEDS; i++) { - shark->leds[i] = shark_led_templates[i]; - snprintf(shark->led_names[i], sizeof(shark->led_names[0]), - shark->leds[i].name, shark->v4l2_dev.name); - shark->leds[i].name = shark->led_names[i]; - /* - * We don't fail the probe if we fail to register the leds, - * because once we've called snd_tea575x_init, the /dev/radio0 - * node may be opened from userspace holding a reference to us! - * - * Note we cannot register the leds first instead as - * shark_led_work depends on the v4l2 mutex and registered bit. - */ - retval = led_classdev_register(&intf->dev, &shark->leds[i]); - if (retval) - v4l2_err(&shark->v4l2_dev, - "couldn't register led: %s\n", - shark->led_names[i]); - } - return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: + shark_unregister_leds(shark); +err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c index b9575de3e7e8..7b4efdfaae28 100644 --- a/drivers/media/radio/radio-shark2.c +++ b/drivers/media/radio/radio-shark2.c @@ -35,6 +35,11 @@ #include <media/v4l2-device.h> #include "radio-tea5777.h" +#if defined(CONFIG_LEDS_CLASS) || \ + (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE)) +#define SHARK_USE_LEDS 1 +#endif + MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver"); MODULE_LICENSE("GPL"); @@ -43,7 +48,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); - #define SHARK_IN_EP 0x83 #define SHARK_OUT_EP 0x05 @@ -54,36 +58,18 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { BLUE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, - enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { - [BLUE_LED] = { - .name = "%s:blue:", - .brightness = LED_OFF, - .max_brightness = 127, - .brightness_set = shark_led_set_blue, - }, - [RED_LED] = { - .name = "%s:red:", - .brightness = LED_OFF, - .max_brightness = 1, - .brightness_set = shark_led_set_red, - }, -}; - struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct radio_tea5777 tea; +#ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; +#endif u8 *transfer_buffer; }; @@ -161,18 +147,12 @@ static struct radio_tea5777_ops shark_tea_ops = { .read_reg = shark_read_reg, }; +#ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; - /* - * We use the v4l2_dev lock and registered bit to ensure the device - * does not get unplugged and unreffed while we're running. - */ - mutex_lock(&shark->tea.mutex); - if (!video_is_registered(&shark->tea.vd)) - goto leave; for (i = 0; i < 2; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) @@ -191,8 +171,6 @@ static void shark_led_work(struct work_struct *work) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } -leave: - mutex_unlock(&shark->tea.mutex); } static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -217,19 +195,72 @@ static void shark_led_set_red(struct led_classdev *led_cdev, schedule_work(&shark->led_work); } +static const struct led_classdev shark_led_templates[NO_LEDS] = { + [BLUE_LED] = { + .name = "%s:blue:", + .brightness = LED_OFF, + .max_brightness = 127, + .brightness_set = shark_led_set_blue, + }, + [RED_LED] = { + .name = "%s:red:", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = shark_led_set_red, + }, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + int i, retval; + + INIT_WORK(&shark->led_work, shark_led_work); + for (i = 0; i < NO_LEDS; i++) { + shark->leds[i] = shark_led_templates[i]; + snprintf(shark->led_names[i], sizeof(shark->led_names[0]), + shark->leds[i].name, shark->v4l2_dev.name); + shark->leds[i].name = shark->led_names[i]; + retval = led_classdev_register(dev, &shark->leds[i]); + if (retval) { + v4l2_err(&shark->v4l2_dev, + "couldn't register led: %s\n", + shark->led_names[i]); + return retval; + } + } + return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ + int i; + + for (i = 0; i < NO_LEDS; i++) + led_classdev_unregister(&shark->leds[i]); + + cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + v4l2_warn(&shark->v4l2_dev, + "CONFIG_LED_CLASS not enabled, LED support disabled\n"); + return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif + static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - int i; mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); radio_tea5777_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); - for (i = 0; i < NO_LEDS; i++) - led_classdev_unregister(&shark->leds[i]); + shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } @@ -238,7 +269,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - cancel_work_sync(&shark->led_work); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); @@ -248,7 +278,7 @@ static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; - int i, retval = -ENOMEM; + int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) @@ -258,17 +288,13 @@ static int usb_shark_probe(struct usb_interface *intf, if (!shark->transfer_buffer) goto err_alloc_buffer; - /* - * Work around a bug in usbhid/hid-core.c, where it leaves a dangling - * pointer in intfdata causing v4l2-device.c to not set it. Which - * results in usb_shark_disconnect() referencing the dangling pointer - * - * REMOVE (as soon as the above bug is fixed, patch submitted) - */ - usb_set_intfdata(intf, NULL); + v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + + retval = shark_register_leds(shark, &intf->dev); + if (retval) + goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; - v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -292,32 +318,13 @@ static int usb_shark_probe(struct usb_interface *intf, goto err_init_tea; } - INIT_WORK(&shark->led_work, shark_led_work); - for (i = 0; i < NO_LEDS; i++) { - shark->leds[i] = shark_led_templates[i]; - snprintf(shark->led_names[i], sizeof(shark->led_names[0]), - shark->leds[i].name, shark->v4l2_dev.name); - shark->leds[i].name = shark->led_names[i]; - /* - * We don't fail the probe if we fail to register the leds, - * because once we've called radio_tea5777_init, the /dev/radio0 - * node may be opened from userspace holding a reference to us! - * - * Note we cannot register the leds first instead as - * shark_led_work depends on the v4l2 mutex and registered bit. - */ - retval = led_classdev_register(&intf->dev, &shark->leds[i]); - if (retval) - v4l2_err(&shark->v4l2_dev, - "couldn't register led: %s\n", - shark->led_names[i]); - } - return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: + shark_unregister_leds(shark); +err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c index 9e38132afec6..9bb65e170d99 100644 --- a/drivers/media/radio/si470x/radio-si470x-common.c +++ b/drivers/media/radio/si470x/radio-si470x-common.c @@ -151,6 +151,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 87500 * 16, @@ -162,6 +163,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, @@ -173,6 +175,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 643a6ff7c5d0..f867f04cccc9 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -225,8 +225,9 @@ int si470x_vidioc_querycap(struct file *file, void *priv, { strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); - capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | - V4L2_CAP_TUNER | V4L2_CAP_RADIO; + capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | + V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; + capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 146be4263ea1..be076f7181e7 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -531,7 +531,7 @@ int si470x_vidioc_querycap(struct file *file, void *priv, strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); usb_make_path(radio->usbdev, capability->bus_info, sizeof(capability->bus_info)); - capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | + capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 5180390be7ab..8be57634ba60 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -261,6 +261,7 @@ config IR_WINBOND_CIR config IR_IGUANA tristate "IguanaWorks USB IR Transceiver" + depends on USB_ARCH_HAS_HCD depends on RC_CORE select USB ---help--- diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c index cf9d9fca5b84..234777116e5f 100644 --- a/drivers/media/video/gspca/jl2005bcd.c +++ b/drivers/media/video/gspca/jl2005bcd.c @@ -512,7 +512,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const __devinitdata struct usb_device_id device_table[] = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0227)}, {} }; diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c index 969bb5a4cd93..bab01c86c315 100644 --- a/drivers/media/video/gspca/spca506.c +++ b/drivers/media/video/gspca/spca506.c @@ -579,7 +579,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const struct usb_device_id device_table[] __devinitconst = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06e1, 0xa190)}, /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 {USB_DEVICE(0x0733, 0x0430)}, */ diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c index 7efe9ad7acc7..0b91a5cd38eb 100644 --- a/drivers/media/video/mem2mem_testdev.c +++ b/drivers/media/video/mem2mem_testdev.c @@ -431,7 +431,7 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info)); - cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c index d2e6f82ecfac..560a65aa7038 100644 --- a/drivers/media/video/mx1_camera.c +++ b/drivers/media/video/mx1_camera.c @@ -403,7 +403,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev) dev_dbg(pcdev->icd->parent, "Activate device\n"); - clk_enable(pcdev->clk); + clk_prepare_enable(pcdev->clk); /* enable CSI before doing anything else */ __raw_writel(csicr1, pcdev->base + CSICR1); @@ -422,7 +422,7 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) /* Disable all CSI interface */ __raw_writel(0x00, pcdev->base + CSICR1); - clk_disable(pcdev->clk); + clk_disable_unprepare(pcdev->clk); } /* diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c index 637bde8aca28..ac175406e582 100644 --- a/drivers/media/video/mx2_camera.c +++ b/drivers/media/video/mx2_camera.c @@ -272,7 +272,7 @@ struct mx2_camera_dev { struct device *dev; struct soc_camera_host soc_host; struct soc_camera_device *icd; - struct clk *clk_csi, *clk_emma; + struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg; unsigned int irq_csi, irq_emma; void __iomem *base_csi, *base_emma; @@ -407,7 +407,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev) { unsigned long flags; - clk_disable(pcdev->clk_csi); + clk_disable_unprepare(pcdev->clk_csi); writel(0, pcdev->base_csi + CSICR1); if (cpu_is_mx27()) { writel(0, pcdev->base_emma + PRP_CNTL); @@ -435,7 +435,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd) if (pcdev->icd) return -EBUSY; - ret = clk_enable(pcdev->clk_csi); + ret = clk_prepare_enable(pcdev->clk_csi); if (ret < 0) return ret; @@ -1633,23 +1633,34 @@ static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev) goto exit_iounmap; } - pcdev->clk_emma = clk_get(NULL, "emma"); - if (IS_ERR(pcdev->clk_emma)) { - err = PTR_ERR(pcdev->clk_emma); + pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg"); + if (IS_ERR(pcdev->clk_emma_ipg)) { + err = PTR_ERR(pcdev->clk_emma_ipg); goto exit_free_irq; } - clk_enable(pcdev->clk_emma); + clk_prepare_enable(pcdev->clk_emma_ipg); + + pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb"); + if (IS_ERR(pcdev->clk_emma_ahb)) { + err = PTR_ERR(pcdev->clk_emma_ahb); + goto exit_clk_emma_ipg_put; + } + + clk_prepare_enable(pcdev->clk_emma_ahb); err = mx27_camera_emma_prp_reset(pcdev); if (err) - goto exit_clk_emma_put; + goto exit_clk_emma_ahb_put; return err; -exit_clk_emma_put: - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); +exit_clk_emma_ahb_put: + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); +exit_clk_emma_ipg_put: + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); exit_free_irq: free_irq(pcdev->irq_emma, pcdev); exit_iounmap: @@ -1685,7 +1696,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev) goto exit; } - pcdev->clk_csi = clk_get(&pdev->dev, NULL); + pcdev->clk_csi = clk_get(&pdev->dev, "ahb"); if (IS_ERR(pcdev->clk_csi)) { dev_err(&pdev->dev, "Could not get csi clock\n"); err = PTR_ERR(pcdev->clk_csi); @@ -1785,8 +1796,10 @@ exit_free_emma: eallocctx: if (cpu_is_mx27()) { free_irq(pcdev->irq_emma, pcdev); - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); iounmap(pcdev->base_emma); release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma)); } @@ -1825,8 +1838,10 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev) iounmap(pcdev->base_csi); if (cpu_is_mx27()) { - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); iounmap(pcdev->base_emma); res = pcdev->res_emma; release_mem_region(res->start, resource_size(res)); diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index f13643d31353..af2297dd49c8 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -61,15 +61,9 @@ #define MAX_VIDEO_MEM 16 -enum csi_buffer_state { - CSI_BUF_NEEDS_INIT, - CSI_BUF_PREPARED, -}; - struct mx3_camera_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_buffer vb; - enum csi_buffer_state state; struct list_head queue; /* One descriptot per scatterlist (per frame) */ @@ -285,7 +279,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) goto error; } - if (buf->state == CSI_BUF_NEEDS_INIT) { + if (!buf->txd) { sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); sg_dma_len(sg) = new_size; @@ -298,7 +292,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) txd->callback_param = txd; txd->callback = mx3_cam_dma_done; - buf->state = CSI_BUF_PREPARED; buf->txd = txd; } else { txd = buf->txd; @@ -385,7 +378,6 @@ static void mx3_videobuf_release(struct vb2_buffer *vb) /* Doesn't hurt also if the list is empty */ list_del_init(&buf->queue); - buf->state = CSI_BUF_NEEDS_INIT; if (txd) { buf->txd = NULL; @@ -405,13 +397,13 @@ static int mx3_videobuf_init(struct vb2_buffer *vb) struct mx3_camera_dev *mx3_cam = ici->priv; struct mx3_camera_buffer *buf = to_mx3_vb(vb); - /* This is for locking debugging only */ - INIT_LIST_HEAD(&buf->queue); - sg_init_table(&buf->sg, 1); + if (!buf->txd) { + /* This is for locking debugging only */ + INIT_LIST_HEAD(&buf->queue); + sg_init_table(&buf->sg, 1); - buf->state = CSI_BUF_NEEDS_INIT; - - mx3_cam->buf_total += vb2_plane_size(vb, 0); + mx3_cam->buf_total += vb2_plane_size(vb, 0); + } return 0; } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index b03ffecb7438..1bde255e45df 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -171,7 +171,8 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd, dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n", pixfmtstr(pix->pixelformat), pix->width, pix->height); - if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { + if (pix->pixelformat != V4L2_PIX_FMT_JPEG && + !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { pix->bytesperline = 0; pix->sizeimage = 0; } diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c index 89dce097a827..a397812635d6 100644 --- a/drivers/media/video/soc_mediabus.c +++ b/drivers/media/video/soc_mediabus.c @@ -378,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { + if (mf->fourcc == V4L2_PIX_FMT_JPEG) + return 0; + if (mf->layout != SOC_MBUS_LAYOUT_PACKED) return width * mf->bits_per_sample / 8; @@ -400,6 +403,9 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height) { + if (mf->fourcc == V4L2_PIX_FMT_JPEG) + return 0; + if (mf->layout == SOC_MBUS_LAYOUT_PACKED) return bytes_per_line * height; diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c index 9288fbd5001b..5577381b5bf0 100644 --- a/drivers/media/video/uvc/uvc_queue.c +++ b/drivers/media/video/uvc/uvc_queue.c @@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { buf->error = 0; buf->state = UVC_BUF_STATE_QUEUED; + buf->bytesused = 0; vb2_set_plane_payload(&buf->buf, 0, 0); return buf; } diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index c3b7b5f59b32..6bc47fc82fe2 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -402,8 +402,10 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only) { const struct v4l2_hw_freq_seek *p = arg; - pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n", - p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing); + pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, " + "rangelow=%u, rangehigh=%u\n", + p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, + p->rangelow, p->rangehigh); } static void v4l_print_requestbuffers(const void *arg, bool write_only) @@ -1853,6 +1855,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, .type = type, }; + if (p->index) + return -EINVAL; err = ops->vidioc_g_tuner(file, fh, &t); if (err) return err; @@ -1870,6 +1874,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, if (type != V4L2_TUNER_RADIO) return -EINVAL; + if (p->index) + return -EINVAL; err = ops->vidioc_g_modulator(file, fh, &m); if (err) return err; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index d1facef28a60..b1a146205c08 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -395,7 +395,8 @@ config MFD_TC6387XB config MFD_TC6393XB bool "Support Toshiba TC6393XB" - depends on GPIOLIB && ARM && HAVE_CLK + depends on ARM && HAVE_CLK + select GPIOLIB select MFD_CORE select MFD_TMIO help diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 383421bf5760..683e18a23329 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -925,6 +925,7 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, goto out; } + ret = 0; if (pdata->leds) { int i; diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 43a76c41cfcc..db662e2dcfa5 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work) } local_irq_enable(); ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq))); + } while (gpio_get_value(pdata->gpio)); } static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index c6ffbbe5a6c0..d78c05e693f7 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -1253,7 +1253,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list, if (dev->wd_timeout) *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); else - *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); + *slots -= mei_data2slots(MEI_WD_PARAMS_SIZE); } } if (dev->stop) diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 092330208869..7422c7652845 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -925,6 +925,27 @@ static struct miscdevice mei_misc_device = { }; /** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * @pdev: PCI device structure + * @ent: entry into pci_device_table + * + * returns true if ME Interface is valid, false otherwise + */ +static bool __devinit mei_quirk_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u32 reg; + if (ent->device == MEI_DEV_ID_PBG_1) { + pci_read_config_dword(pdev, 0x48, ®); + /* make sure that bit 9 is up and bit 10 is down */ + if ((reg & 0x600) == 0x200) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; + } + } + return true; +} +/** * mei_probe - Device Initialization Routine * * @pdev: PCI device structure @@ -939,6 +960,12 @@ static int __devinit mei_probe(struct pci_dev *pdev, int err; mutex_lock(&mei_mutex); + + if (!mei_quirk_probe(pdev, ent)) { + err = -ENODEV; + goto end; + } + if (mei_device) { err = -EEXIST; goto end; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 87b251ab6ec5..b9e2000969f0 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,6 +18,8 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/cpu.h> +#include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/uv/uv_hub.h> @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1742,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1765,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c index 1ff460a8e9c7..93b4d67cc4a3 100644 --- a/drivers/misc/ti-st/st_ll.c +++ b/drivers/misc/ti-st/st_ll.c @@ -87,7 +87,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data) /* communicate to platform about chip wakeup */ kim_data = st_data->kim_data; pdata = kim_data->kim_pdev->dev.platform_data; - if (pdata->chip_asleep) + if (pdata->chip_awake) pdata->chip_awake(NULL); } diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 3e8dcf8d2e05..50e08f03aa65 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -17,10 +17,12 @@ #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/clk.h> @@ -128,6 +130,10 @@ struct mmc_omap_host { unsigned char id; /* 16xx chips have 2 MMC blocks */ struct clk * iclk; struct clk * fclk; + struct dma_chan *dma_rx; + u32 dma_rx_burst; + struct dma_chan *dma_tx; + u32 dma_tx_burst; struct resource *mem_res; void __iomem *virt_base; unsigned int phys_base; @@ -153,12 +159,8 @@ struct mmc_omap_host { unsigned use_dma:1; unsigned brs_received:1, dma_done:1; - unsigned dma_is_read:1; unsigned dma_in_use:1; - int dma_ch; spinlock_t dma_lock; - struct timer_list dma_timer; - unsigned dma_len; struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; struct mmc_omap_slot *current_slot; @@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, int abort) { enum dma_data_direction dma_data_dir; + struct device *dev = mmc_dev(host->mmc); + struct dma_chan *c; - BUG_ON(host->dma_ch < 0); - if (data->error) - omap_stop_dma(host->dma_ch); - /* Release DMA channel lazily */ - mod_timer(&host->dma_timer, jiffies + HZ); - if (data->flags & MMC_DATA_WRITE) + if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; - else + c = host->dma_tx; + } else { dma_data_dir = DMA_FROM_DEVICE; - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, - dma_data_dir); + c = host->dma_rx; + } + if (c) { + if (data->error) { + dmaengine_terminate_all(c); + /* Claim nothing transferred on error... */ + data->bytes_xfered = 0; + } + dev = c->device->dev; + } + dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); } static void mmc_omap_send_stop_work(struct work_struct *work) @@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) } static void -mmc_omap_dma_timer(unsigned long data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - - BUG_ON(host->dma_ch < 0); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; -} - -static void mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) { unsigned long flags; @@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param) jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); } -/* Prepare to transfer the next segment of a scatterlist */ -static void -mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) +static void mmc_omap_dma_callback(void *priv) { - int dma_ch = host->dma_ch; - unsigned long data_addr; - u16 buf, frame; - u32 count; - struct scatterlist *sg = &data->sg[host->sg_idx]; - int src_port = 0; - int dst_port = 0; - int sync_dev = 0; - - data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); - frame = data->blksz; - count = sg_dma_len(sg); - - if ((data->blocks == 1) && (count > data->blksz)) - count = frame; - - host->dma_len = count; - - /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. - * Use 16 or 32 word frames when the blocksize is at least that large. - * Blocksize is usually 512 bytes; but not for some SD reads. - */ - if (cpu_is_omap15xx() && frame > 32) - frame = 32; - else if (frame > 64) - frame = 64; - count /= frame; - frame >>= 1; - - if (!(data->flags & MMC_DATA_WRITE)) { - buf = 0x800f | ((frame - 1) << 8); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_TIPB; - dst_port = OMAP_DMA_PORT_EMIFF; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_RX; - - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_dest_data_pack(dma_ch, 1); - omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } else { - buf = 0x0f80 | ((frame - 1) << 0); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_EMIFF; - dst_port = OMAP_DMA_PORT_TIPB; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_TX; - - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_src_data_pack(dma_ch, 1); - omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } + struct mmc_omap_host *host = priv; + struct mmc_data *data = host->data; - /* Max limit for DMA frame count is 0xffff */ - BUG_ON(count > 0xffff); + /* If we got to the end of DMA, assume everything went well */ + data->bytes_xfered += data->blocks * data->blksz; - OMAP_MMC_WRITE(host, BUF, buf); - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, - frame, count, OMAP_DMA_SYNC_FRAME, - sync_dev, 0); -} - -/* A scatterlist segment completed */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - struct mmc_data *mmcdat = host->data; - - if (unlikely(host->dma_ch < 0)) { - dev_err(mmc_dev(host->mmc), - "DMA callback while DMA not enabled\n"); - return; - } - /* FIXME: We really should do something to _handle_ the errors */ - if (ch_status & OMAP1_DMA_TOUT_IRQ) { - dev_err(mmc_dev(host->mmc),"DMA timeout\n"); - return; - } - if (ch_status & OMAP_DMA_DROP_IRQ) { - dev_err(mmc_dev(host->mmc), "DMA sync error\n"); - return; - } - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - return; - } - mmcdat->bytes_xfered += host->dma_len; - host->sg_idx++; - if (host->sg_idx < host->sg_len) { - mmc_omap_prepare_dma(host, host->data); - omap_start_dma(host->dma_ch); - } else - mmc_omap_dma_done(host, host->data); -} - -static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) -{ - const char *dma_dev_name; - int sync_dev, dma_ch, is_read, r; - - is_read = !(data->flags & MMC_DATA_WRITE); - del_timer_sync(&host->dma_timer); - if (host->dma_ch >= 0) { - if (is_read == host->dma_is_read) - return 0; - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - } - - if (is_read) { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_RX; - dma_dev_name = "MMC1 read"; - } else { - sync_dev = OMAP_DMA_MMC2_RX; - dma_dev_name = "MMC2 read"; - } - } else { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_TX; - dma_dev_name = "MMC1 write"; - } else { - sync_dev = OMAP_DMA_MMC2_TX; - dma_dev_name = "MMC2 write"; - } - } - r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, - host, &dma_ch); - if (r != 0) { - dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); - return r; - } - host->dma_ch = dma_ch; - host->dma_is_read = is_read; - - return 0; + mmc_omap_dma_done(host, data); } static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) host->sg_idx = 0; if (use_dma) { - if (mmc_omap_get_dma_channel(host, data) == 0) { - enum dma_data_direction dma_data_dir; - - if (data->flags & MMC_DATA_WRITE) - dma_data_dir = DMA_TO_DEVICE; - else - dma_data_dir = DMA_FROM_DEVICE; - - host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - sg_len, dma_data_dir); - host->total_bytes_left = 0; - mmc_omap_prepare_dma(host, req->data); - host->brs_received = 0; - host->dma_done = 0; - host->dma_in_use = 1; - } else - use_dma = 0; + enum dma_data_direction dma_data_dir; + struct dma_async_tx_descriptor *tx; + struct dma_chan *c; + u32 burst, *bp; + u16 buf; + + /* + * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx + * and 24xx. Use 16 or 32 word frames when the + * blocksize is at least that large. Blocksize is + * usually 512 bytes; but not for some SD reads. + */ + burst = cpu_is_omap15xx() ? 32 : 64; + if (burst > data->blksz) + burst = data->blksz; + + burst >>= 1; + + if (data->flags & MMC_DATA_WRITE) { + c = host->dma_tx; + bp = &host->dma_tx_burst; + buf = 0x0f80 | (burst - 1) << 0; + dma_data_dir = DMA_TO_DEVICE; + } else { + c = host->dma_rx; + bp = &host->dma_rx_burst; + buf = 0x800f | (burst - 1) << 8; + dma_data_dir = DMA_FROM_DEVICE; + } + + if (!c) + goto use_pio; + + /* Only reconfigure if we have a different burst size */ + if (*bp != burst) { + struct dma_slave_config cfg; + + cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.src_maxburst = burst; + cfg.dst_maxburst = burst; + + if (dmaengine_slave_config(c, &cfg)) + goto use_pio; + + *bp = burst; + } + + host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, + dma_data_dir); + if (host->sg_len == 0) + goto use_pio; + + tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto use_pio; + + OMAP_MMC_WRITE(host, BUF, buf); + + tx->callback = mmc_omap_dma_callback; + tx->callback_param = host; + dmaengine_submit(tx); + host->brs_received = 0; + host->dma_done = 0; + host->dma_in_use = 1; + return; } + use_pio: /* Revert to PIO? */ - if (!use_dma) { - OMAP_MMC_WRITE(host, BUF, 0x1f1f); - host->total_bytes_left = data->blocks * block_size; - host->sg_len = sg_len; - mmc_omap_sg_to_buf(host); - host->dma_in_use = 0; - } + OMAP_MMC_WRITE(host, BUF, 0x1f1f); + host->total_bytes_left = data->blocks * block_size; + host->sg_len = sg_len; + mmc_omap_sg_to_buf(host); + host->dma_in_use = 0; } static void mmc_omap_start_request(struct mmc_omap_host *host, @@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host, /* only touch fifo AFTER the controller readies it */ mmc_omap_prepare_data(host, req); mmc_omap_start_command(host, req->cmd); - if (host->dma_in_use) - omap_start_dma(host->dma_ch); + if (host->dma_in_use) { + struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? + host->dma_tx : host->dma_rx; + + dma_async_issue_pending(c); + } } static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) @@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_omap_host *host = NULL; struct resource *res; + dma_cap_mask_t mask; + unsigned sig; int i, ret = 0; int irq; @@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); spin_lock_init(&host->dma_lock); - setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); spin_lock_init(&host->slot_lock); init_waitqueue_head(&host->slot_wq); @@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) host->id = pdev->id; host->mem_res = res; host->irq = irq; - host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; - host->dma_ch = -1; - host->irq = irq; host->phys_base = host->mem_res->start; host->virt_base = ioremap(res->start, resource_size(res)); @@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) goto err_free_iclk; } + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma_tx_burst = -1; + host->dma_rx_burst = -1; + + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; + host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_tx) { + dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_tx) + dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); +#endif + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; + host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_rx) { + dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_rx) + dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); +#endif + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); if (ret) - goto err_free_fclk; + goto err_free_dma; if (pdata->init != NULL) { ret = pdata->init(&pdev->dev); @@ -1510,7 +1457,11 @@ err_plat_cleanup: pdata->cleanup(&pdev->dev); err_free_irq: free_irq(host->irq, host); -err_free_fclk: +err_free_dma: + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); clk_put(host->fclk); err_free_iclk: clk_disable(host->iclk); @@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev) clk_disable(host->iclk); clk_put(host->iclk); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); + iounmap(host->virt_base); release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index bc28627af66b..3a09f93cc3b6 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/debugfs.h> +#include <linux/dmaengine.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -29,6 +30,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_device.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/core.h> #include <linux/mmc/mmc.h> @@ -37,7 +39,6 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> -#include <plat/dma.h> #include <mach/hardware.h> #include <plat/board.h> #include <plat/mmc.h> @@ -166,7 +167,8 @@ struct omap_hsmmc_host { int suspended; int irq; int use_dma, dma_ch; - int dma_line_tx, dma_line_rx; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; int slot_id; int response_busy; int context_loss; @@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) { int dma_ch; @@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) spin_unlock_irqrestore(&host->irq_lock, flags); if (host->use_dma && dma_ch != -1) { - dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, - host->data->sg_len, + struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, + host->data->sg, host->data->sg_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(dma_ch); + host->data->host_cookie = 0; } host->data = NULL; @@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) return IRQ_HANDLED; } -static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, - struct mmc_data *data) -{ - int sync_dev; - - if (data->flags & MMC_DATA_WRITE) - sync_dev = host->dma_line_tx; - else - sync_dev = host->dma_line_rx; - return sync_dev; -} - -static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, - struct mmc_data *data, - struct scatterlist *sgl) -{ - int blksz, nblk, dma_ch; - - dma_ch = host->dma_ch; - if (data->flags & MMC_DATA_WRITE) { - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } else { - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } - - blksz = host->data->blksz; - nblk = sg_dma_len(sgl) / blksz; - - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, - blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, - omap_hsmmc_get_dma_sync_dev(host, data), - !(data->flags & MMC_DATA_WRITE)); - - omap_start_dma(dma_ch); -} - -/* - * DMA call back function - */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) +static void omap_hsmmc_dma_callback(void *param) { - struct omap_hsmmc_host *host = cb_data; + struct omap_hsmmc_host *host = param; + struct dma_chan *chan; struct mmc_data *data; - int dma_ch, req_in_progress; - unsigned long flags; - - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", - ch_status); - return; - } + int req_in_progress; - spin_lock_irqsave(&host->irq_lock, flags); + spin_lock_irq(&host->irq_lock); if (host->dma_ch < 0) { - spin_unlock_irqrestore(&host->irq_lock, flags); + spin_unlock_irq(&host->irq_lock); return; } data = host->mrq->data; - host->dma_sg_idx++; - if (host->dma_sg_idx < host->dma_len) { - /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, data, - data->sg + host->dma_sg_idx); - spin_unlock_irqrestore(&host->irq_lock, flags); - return; - } - + chan = omap_hsmmc_get_dma_chan(host, data); if (!data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); req_in_progress = host->req_in_progress; - dma_ch = host->dma_ch; host->dma_ch = -1; - spin_unlock_irqrestore(&host->irq_lock, flags); - - omap_free_dma(dma_ch); + spin_unlock_irq(&host->irq_lock); /* If DMA has finished after TC, complete the request */ if (!req_in_progress) { @@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, struct mmc_data *data, - struct omap_hsmmc_next *next) + struct omap_hsmmc_next *next, + struct dma_chan *chan) { int dma_len; @@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, /* Check if next job is already prepared */ if (next || (!next && data->host_cookie != host->next_data.cookie)) { - dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, + dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); } else { @@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, i; + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *tx; + int ret = 0, i; struct mmc_data *data = req->data; + struct dma_chan *chan; /* Sanity check: all the SG entries must be aligned by block size. */ for (i = 0; i < data->sg_len; i++) { @@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, BUG_ON(host->dma_ch != -1); - ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), - "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); - if (ret != 0) { - dev_err(mmc_dev(host->mmc), - "%s: omap_request_dma() failed with %d\n", - mmc_hostname(host->mmc), ret); + chan = omap_hsmmc_get_dma_chan(host, data); + + cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) return ret; - } - ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); + + ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); if (ret) return ret; - host->dma_ch = dma_ch; - host->dma_sg_idx = 0; + tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + /* FIXME: cleanup */ + return -1; + } + + tx->callback = omap_hsmmc_dma_callback; + tx->callback_param = host; - omap_hsmmc_config_dma_params(host, data, data->sg); + /* Does not fail */ + dmaengine_submit(tx); + + host->dma_ch = 1; + + dma_async_issue_pending(chan); return 0; } @@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, struct omap_hsmmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - if (host->use_dma) { - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, - omap_hsmmc_get_dma_dir(host, data)); + if (host->use_dma && data->host_cookie) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + + dma_unmap_sg(c->device->dev, data->sg, data->sg_len, + omap_hsmmc_get_dma_dir(host, data)); data->host_cookie = 0; } } @@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, return ; } - if (host->use_dma) + if (host->use_dma) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + if (omap_hsmmc_pre_dma_transfer(host, mrq->data, - &host->next_data)) + &host->next_data, c)) mrq->data->host_cookie = 0; + } } /* @@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) struct resource *res; int ret, irq; const struct of_device_id *match; + dma_cap_mask_t mask; + unsigned tx_req, rx_req; match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); if (match) { @@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) host->pdata = pdata; host->dev = &pdev->dev; host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; host->slot_id = 0; @@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_tx = res->start; + tx_req = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { @@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_rx = res->start; + rx_req = res->start; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); + if (!host->rx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); + ret = -ENXIO; + goto err_irq; + } + + host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); + if (!host->tx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); + ret = -ENXIO; + goto err_irq; + } /* Request IRQ for MMC operations */ ret = request_irq(host->irq, omap_hsmmc_irq, 0, @@ -2021,6 +2014,10 @@ err_reg: err_irq_cd_init: free_irq(host->irq, host); err_irq: + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); @@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev) if (mmc_slot(host).card_detect_irq) free_irq(mmc_slot(host).card_detect_irq, host); + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); + pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index cfff454f628b..c3bb304eca07 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c @@ -19,14 +19,13 @@ #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> +#include <asm/sections.h> /****************************************************************************/ -extern char _ebss; - struct map_info uclinux_ram_map = { .name = "RAM", - .phys = (unsigned long)&_ebss, + .phys = (unsigned long)__bss_stop, .size = 0, }; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 31bb7e5b504a..8ca417614c57 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -480,7 +480,7 @@ config MTD_NAND_NANDSIM config MTD_NAND_GPMI_NAND bool "GPMI NAND Flash Controller driver" - depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q) + depends on MTD_NAND && MXS_DMA help Enables NAND Flash support for IMX23 or IMX28. The GPMI controller is very powerful, with the help of BCH diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index d7f681d0c9b9..ac4fd756eda3 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -9,6 +9,7 @@ */ #include <linux/platform_device.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> @@ -18,6 +19,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> +#include <linux/omap-dma.h> #include <linux/io.h> #include <linux/slab.h> @@ -123,7 +125,7 @@ struct omap_nand_info { int gpmc_cs; unsigned long phys_base; struct completion comp; - int dma_ch; + struct dma_chan *dma; int gpmc_irq; enum { OMAP_NAND_IO_READ = 0, /* read */ @@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* - * omap_nand_dma_cb: callback on the completion of dma transfer - * @lch: logical channel - * @ch_satuts: channel status + * omap_nand_dma_callback: callback on the completion of dma transfer * @data: pointer to completion data structure */ -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) +static void omap_nand_dma_callback(void *data) { complete((struct completion *) data); } @@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); + struct dma_async_tx_descriptor *tx; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - dma_addr_t dma_addr; - int ret; + struct scatterlist sg; unsigned long tim, limit; - - /* The fifo depth is 64 bytes max. - * But configure the FIFO-threahold to 32 to get a sync at each frame - * and frame length is 32 bytes. - */ - int buf_len = len >> 6; + unsigned n; + int ret; if (addr >= high_memory) { struct page *p1; @@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); } - dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); - if (dma_mapping_error(&info->pdev->dev, dma_addr)) { + sg_init_one(&sg, addr, len); + n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); + if (n == 0) { dev_err(&info->pdev->dev, "Couldn't DMA map a %d byte buffer\n", len); goto out_copy; } - if (is_write) { - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); - } else { - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); - } - /* configure and start prefetch transfer */ + tx = dmaengine_prep_slave_sg(info->dma, &sg, n, + is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto out_copy_unmap; + + tx->callback = omap_nand_dma_callback; + tx->callback_param = &info->comp; + dmaengine_submit(tx); + + /* configure and start prefetch transfer */ ret = gpmc_prefetch_enable(info->gpmc_cs, - PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) /* PFPW engine is busy, use cpu copy method */ goto out_copy_unmap; init_completion(&info->comp); - - omap_start_dma(info->dma_ch); + dma_async_issue_pending(info->dma); /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); @@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); return 0; out_copy_unmap: - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); out_copy: if (info->nand.options & NAND_BUSWIDTH_16) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) @@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_platform_data *pdata; int err; int i, offset; + dma_cap_mask_t mask; + unsigned sig; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -1244,18 +1235,30 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) break; case NAND_OMAP_PREFETCH_DMA: - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - dev_err(&pdev->dev, "DMA request failed!\n"); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = OMAP24XX_DMA_GPMC; + info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!info->dma) { + dev_err(&pdev->dev, "DMA engine request failed\n"); + err = -ENXIO; goto out_release_mem_region; } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - + struct dma_slave_config cfg; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = info->phys_base; + cfg.dst_addr = info->phys_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 16; + cfg.dst_maxburst = 16; + err = dmaengine_slave_config(info->dma, &cfg); + if (err) { + dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", + err); + goto out_release_mem_region; + } info->nand.read_buf = omap_read_buf_dma_pref; info->nand.write_buf = omap_write_buf_dma_pref; } @@ -1358,6 +1361,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) return 0; out_release_mem_region: + if (info->dma) + dma_release_channel(info->dma); release_mem_region(info->phys_base, NAND_IO_SIZE); out_free_info: kfree(info); @@ -1373,8 +1378,8 @@ static int omap_nand_remove(struct platform_device *pdev) omap3_free_bch(&info->mtd); platform_set_drvdata(pdev, NULL); - if (info->dma_ch != -1) - omap_free_dma(info->dma_ch); + if (info->dma) + dma_release_channel(info->dma); if (info->gpmc_irq) free_irq(info->gpmc_irq, info); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 545c09ed9079..cff6f023c03a 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -996,9 +996,7 @@ static int __init cops_module_init(void) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); - if (IS_ERR(cops_dev)) - return PTR_ERR(cops_dev); - return 0; + return PTR_RET(cops_dev); } static void __exit cops_module_exit(void) diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 0910dce3996d..b5782cdf0bca 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void) "ltpc: Autoprobing is not recommended for modules\n"); dev_ltpc = ltpc_probe(); - if (IS_ERR(dev_ltpc)) - return PTR_ERR(dev_ltpc); - return 0; + return PTR_RET(dev_ltpc); } module_init(ltpc_module_init); #endif diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6fae5f3ec7f6..d688a8af432c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; - if (unlikely(netpoll_tx_running(slave_dev))) + if (unlikely(netpoll_tx_running(bond->dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); else dev_queue_xmit(skb); @@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave) struct netpoll *np; int err = 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), GFP_ATOMIC); err = -ENOMEM; if (!np) goto out; - err = __netpoll_setup(np, slave->dev); + err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); if (err) { kfree(np); goto out; @@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave) return; slave->np = NULL; - synchronize_rcu_bh(); - __netpoll_cleanup(np); - kfree(np); + __netpoll_free_rcu(np); } static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) { @@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) read_unlock(&bond->lock); } -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) { struct bonding *bond = netdev_priv(dev); struct slave *slave; diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index f0c8bd54ce29..021d69c5d9bc 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1712,7 +1712,7 @@ e100_set_network_leds(int active) static void e100_netpoll(struct net_device* netdev) { - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 77bcd4cb4ffb..463b9ec57d80 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1278,7 +1278,7 @@ struct bnx2x { #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index dd451c3dd83d..02b5a343b195 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } -/* - * Reset the load status for the current engine. - */ -static void bnx2x_clear_load_status(struct bnx2x *bp) -{ - u32 val; - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); - val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); -} - static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); @@ -9384,32 +9370,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) return rc; } -static bool __devinit bnx2x_can_flr(struct bnx2x *bp) -{ - int pos; - u32 cap; - struct pci_dev *dev = bp->pdev; - - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return false; - - return true; -} - static int __devinit bnx2x_do_flr(struct bnx2x *bp) { int i, pos; u16 status; struct pci_dev *dev = bp->pdev; - /* probe the capability first */ - if (bnx2x_can_flr(bp)) - return -ENOTTY; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } pos = pci_pcie_cap(dev); if (!pos) @@ -9429,12 +9407,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp) "transaction is not cleared; proceeding with reset anyway\n"); clear: - if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { - BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", - bp->common.bc_ver); - return -EINVAL; - } + BNX2X_DEV_INFO("Initiating FLR\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); return 0; @@ -9454,8 +9428,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ - if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) - return bnx2x_do_flr(bp); + rc = bnx2x_test_firmware_version(bp, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bnx2x_prev_mcp_done(bp); @@ -11427,9 +11414,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - /* Reset the load counter */ - bnx2x_clear_load_status(bp); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..62f754bd0dfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, break; default: + kfree(new_cmd); BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c60de89b6669..90a903d83d87 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs != MAX_RX_QS) dev_info(&adapter->pdev->dev, - "Created only %d receive queues", adapter->num_rx_qs); + "Created only %d receive queues\n", adapter->num_rx_qs); return 0; } diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a710909..151453309401 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e6..cdf702a59485 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); fec->fecp = ioremap(res.start, resource_size(&res)); - if (!fec->fecp) + if (!fec->fecp) { + ret = -ENOMEM; goto out_fec; + } if (get_bus_freq) { clock = get_bus_freq(ofdev->dev.of_node); @@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 0b3bade957fd..080c89093feb 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext, eecd; + u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; /* @@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); e1e_flush(); usleep_range(10000, 20000); @@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 95b245310f17..46c3b1f9ff89 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + /* * e1000e_dump - Print registers, Tx-ring and Rx-ring */ @@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); } /* Print Rx Ring Summary */ @@ -381,10 +399,8 @@ rx_ring_summary: buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_ps_bsize0, true); + e1000e_dump_ps_pages(adapter, + buffer_info); } } break; @@ -444,12 +460,12 @@ rx_ring_summary: (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) + if (netif_msg_pktdata(adapter) && + buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt - (buffer_info->dma), + buffer_info->skb->data, adapter->rx_buffer_len, true); } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 5e84eaac48c1..ba994fb4cec6 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; + /* + * Check for invalid size + */ + if ((hw->mac.type == e1000_82576) && (size > 15)) { + pr_notice("The NVM size is not valid, defaulting to 32K\n"); + size = 15; + } + nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { nvm->opcode_bits = 8; @@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) } else nvm->type = e1000_nvm_flash_hw; - /* - * Check for invalid size - */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); - size = 15; - } - /* NVM Function Pointers */ switch (hw->mac.type) { case e1000_82580: diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 10efcd88dca0..28394bea5253 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -156,8 +156,12 @@ : (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ : (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index a19c84cad0e9..70591117051b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } @@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, wr32(reg, (_test[pat] & write)); val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write & mask)); *data = reg; return 1; @@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, wr32(reg, write & mask); val = rd32(reg); if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" - " got 0x%08X expected 0x%08X\n", reg, + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; @@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_STATUS, toggle); after = rd32(E1000_STATUS) & toggle; if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " - "got: 0x%08X expected: 0x%08X\n", after, value); + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) break; } + /* add small delay to avoid loopback test failure */ + msleep(50); + /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); @@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) * sessions are active */ if (igb_check_reset_block(&adapter->hw)) { dev_err(&adapter->pdev->dev, - "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); + "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i210)) { + || (adapter->hw.mac.type == e1000_i211)) { dev_err(&adapter->pdev->dev, - "Loopback test not supported " - "on this part at this time.\n"); + "Loopback test not supported on this part at this time.\n"); *data = 0; goto out; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b7c2d5050572..48cc4fb1a307 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter) (u64)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), + 16, 1, buffer_info->skb->data, buffer_info->length, true); } } @@ -547,18 +547,17 @@ rx_ring_summary: (u64)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) { + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->skb) { print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - IGB_RX_HDR_LEN, true); + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + IGB_RX_HDR_LEN, true); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), + page_address(buffer_info->page) + + buffer_info->page_offset, PAGE_SIZE/2, true); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 50fc137501da..18bf08c9d7a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; + } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f32e70300770..5aba5ecdf1e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* If source MAC is equal to our own MAC and not performing * the selftest or flb disabled - drop the packet */ if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) + !((dev->features & NETIF_F_LOOPBACK) || + priv->validate_loopback)) goto next; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 019d856b1334..10bba09c44ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; - ring->blocked = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); @@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ring->cons += txbbs_skipped; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* Wakeup Tx queue if this ring stopped it */ - if (unlikely(ring->blocked)) { - if ((u32) (ring->prod - ring->cons) <= - ring->size - HEADROOM - MAX_DESC_TXBBS) { - ring->blocked = 0; - netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; - } + /* + * Wakeup Tx queue if this stopped, and at least 1 packet + * was completed + */ + if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; } } @@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - ring->blocked = 1; priv->port_stats.queue_stopped++; return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 88b7b3e75ab1..daf417923661 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -358,13 +358,14 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, } int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent) { int obj_per_chunk; int num_icm; unsigned chunk_size; int i; + u64 size; obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; @@ -380,10 +381,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table->coherent = use_coherent; mutex_init(&table->mutex); + size = (u64) nobj * obj_size; for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { chunk_size = MLX4_TABLE_CHUNK_SIZE; - if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) - chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) + chunk_size = PAGE_ALIGN(size - + i * MLX4_TABLE_CHUNK_SIZE); table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index 19e4efc0b342..a67744f53506 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -78,7 +78,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, int start, int end); int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 48d0e90194cb..827b72dfce99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev, "on this HCA, aborting.\n"); return -EINVAL; } - if (port_type[i] == MLX4_PORT_TYPE_ETH && - port_type[i + 1] == MLX4_PORT_TYPE_IB) - return -EINVAL; } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4ec3835e1bc2..a018ea2a43de 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { /* Entry already exists, add to duplicates */ dqp = kmalloc(sizeof *dqp, GFP_KERNEL); - if (!dqp) + if (!dqp) { + err = -ENOMEM; goto out_mailbox; + } dqp->qpn = qpn; list_add_tail(&dqp->list, &entry->duplicates); found = true; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 59ebc0339638..4d9df8f2a126 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -249,7 +249,7 @@ struct mlx4_bitmap { struct mlx4_buddy { unsigned long **bits; unsigned int *num_free; - int max_order; + u32 max_order; spinlock_t lock; }; @@ -258,7 +258,7 @@ struct mlx4_icm; struct mlx4_icm_table { u64 virt; int num_icm; - int num_obj; + u32 num_obj; int obj_size; int lowmem; int coherent; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5f1ab105debc..9d27e42264e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -248,7 +248,6 @@ struct mlx4_en_tx_ring { u32 doorbell_qpn; void *buf; u16 poll_cnt; - int blocked; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; u32 last_nr_txbb; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index af55b7ce5341..c202d3ad2a0e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -37,6 +37,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/kernel.h> +#include <linux/vmalloc.h> #include <linux/mlx4/cmd.h> @@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); @@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); - if (!buddy->bits[i]) - goto err_out_free; - bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); + if (!buddy->bits[i]) + goto err_out_free; + } } set_bit(0, buddy->bits[buddy->max_order]); @@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); @@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, - ilog2(dev->caps.num_mtts / + ilog2((u32)dev->caps.num_mtts / (1 << log_mtts_per_seg))); if (err) goto err_buddy; @@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)); if (priv->reserved_mtts < 0) { - mlx4_warn(dev, "MTT table of order %d is too small.\n", + mlx4_warn(dev, "MTT table of order %u is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; goto err_reserve_mtts; diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 9ee4725363d5..8e0c3cc2a1ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 size; u64 start; int type; - int num; + u32 num; int log_num; }; @@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, si_meminfo(&si); request->num_mtt = roundup_pow_of_two(max_t(unsigned, request->num_mtt, - min(1UL << 31, + min(1UL << (31 - log_mtts_per_seg), si.totalram >> (log_mtts_per_seg - 1)))); profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..34ee09bae36e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, } /* - * Adjust port configuration: - * If port 1 sensed nothing and port 2 is IB, set both as IB - * If port 2 sensed nothing and port 1 is Eth, set both as Eth - */ - if (stype[0] == MLX4_PORT_TYPE_ETH) { - for (i = 1; i < dev->caps.num_ports; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; - } - if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { - for (i = 0; i < dev->caps.num_ports - 1; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; - } - - /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 4069edab229e..53743f7a2ca9 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev) "phy-mode", NULL); if (mode && !strcmp(mode, "mii")) return PHY_INTERFACE_MODE_MII; - return PHY_INTERFACE_MODE_RMII; } - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT - return PHY_INTERFACE_MODE_MII; -#else return PHY_INTERFACE_MODE_RMII; -#endif } static bool use_iram_for_net(struct device *dev) { if (dev && dev->of_node) return of_property_read_bool(dev->of_node, "use-iram"); - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET - return true; -#else return false; -#endif } /* Receive Status information word */ diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 46df3a04030c..24c2305d7948 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -8,7 +8,7 @@ config SH_ETH (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757 || ARCH_R8A7740) + CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779) select CRC32 select NET_CORE select MII @@ -18,4 +18,4 @@ config SH_ETH Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - and R8A7740. + R8A7740 and R8A7779. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index af0b867a6cf6..bad8f2eec9b4 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev) #endif /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) +#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { @@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev) static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned int bits = ECMR_RTM; + +#if defined(CONFIG_ARCH_R8A7779) + bits |= ECMR_ELB; +#endif switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); break; default: break; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 70554a1b2b02..65a8d49106a4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); @@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index be8f9158a714..70755c97251a 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -30,6 +30,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 10536f93b561..8cba2df82b18 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 9b225a7769f7..18713436b443 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd8882f9602a..c136162e6473 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2077,7 +2077,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - priv->stmmac_clk = clk_get(priv->device, NULL); + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { pr_warning("%s: warning: cannot get CSR clock\n", __func__); goto error_clk_get; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cd01ee7ecef1..b93245c11995 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ -static int stmmac_pltfr_probe(struct platform_device *pdev) +static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 3b5c4571b55e..d15c888e9df8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { - struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_ctlr *ctlr; unsigned long flags; if (!chan) return -EINVAL; + ctlr = chan->ctlr; spin_lock_irqsave(&ctlr->lock, flags); if (chan->state != CPDMA_STATE_IDLE) diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 482648fcf0b6..98934bdf6acf 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev) } int ixp46x_phc_index = -1; +EXPORT_SYMBOL_GPL(ixp46x_phc_index); static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6cee2917eb02..4a1a5f58fa73 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device) unsigned long flags; net_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); - net_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); - - /* Wait for all send completions */ - wait_event(net_device->wait_drain, - atomic_read(&net_device->num_outstanding_sends) == 0); netvsc_disconnect_vsp(net_device); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e5d6146937fa..1e88a1095934 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; + struct netvsc_device *nvdev = dev->net_dev; + struct hv_device *hdev = nvdev->dev; + ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: + spin_lock_irqsave(&hdev->channel->inbound_lock, flags); + nvdev->destroy = true; + spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Wait for all send completions */ + wait_event(nvdev->wait_drain, + atomic_read(&nvdev->num_outstanding_sends) == 0); + if (request) put_rndis_request(dev, request); return; diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index a561ae44a9ac..c6a0299aa9f9 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); return ret; } @@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); - val &= ~(UCEN | IREN | RPOLC); + val &= ~(UCEN | UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA @@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work) * reset all the UART. */ val = UART_GET_GCTL(port); - val &= ~(IREN | RPOLC); + val &= ~(UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); SSYNC(); - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index 824e2a93fe8a..5f3aeac3f86d 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c @@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 5a278ab83c2f..2d4b6a1ab202 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c @@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0737bd4d1669..0f0f9ce3a776 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q) int i; for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { - if (rcu_dereference(vlan->taps[i]) == q) + if (rcu_dereference_protected(vlan->taps[i], + lockdep_is_held(&macvtap_lock)) == q) return i; } diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index f9347ea3d381..b3321129a83c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this, * rtnl_lock already held */ if (nt->np.dev) { - spin_unlock_irqrestore( - &target_list_lock, - flags); __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, - flags); dev_put(nt->np.dev); nt->np.dev = NULL; - netconsole_target_put(nt); } nt->enabled = 0; stopped = true; diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index e0cc4ef33dee..eefe49e8713c 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -101,7 +101,6 @@ err: n--; gpio_free(s->gpio[n]); } - devm_kfree(&pdev->dev, s); return r; } diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 5c120189ec86..4d4d25efc1e1 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev, pb->mii_bus = parent_bus; ret_val = -ENODEV; - for_each_child_of_node(dev->of_node, child_bus_node) { + for_each_available_child_of_node(dev->of_node, child_bus_node) { u32 v; r = of_property_read_u32(child_bus_node, "reg", &v); diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1c98321b56cc..162464fe86bf 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - rt = ip_route_output_ports(&init_net, &fl4, NULL, + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - rt = ip_route_output_ports(&init_net, &fl4, sk, + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 87707ab39430..341b65dbbcd3 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -795,16 +795,17 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { struct netpoll *np; int err; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), gfp); if (!np) return -ENOMEM; - err = __netpoll_setup(np, port->dev); + err = __netpoll_setup(np, port->dev, gfp); if (err) { kfree(np); return err; @@ -833,7 +834,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team) } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { return 0; } @@ -913,7 +915,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } if (team_netpoll_info(team)) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, GFP_KERNEL); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1443,7 +1445,7 @@ static void team_netpoll_cleanup(struct net_device *dev) } static int team_netpoll_setup(struct net_device *dev, - struct netpoll_info *npifo) + struct netpoll_info *npifo, gfp_t gfp) { struct team *team = netdev_priv(dev); struct team_port *port; @@ -1451,7 +1453,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, gfp); if (err) { __team_netpoll_cleanup(team); break; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 926d4db5cb38..3a16d4fdaa05 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun) netif_tx_lock_bh(tun->dev); netif_carrier_off(tun->dev); tun->tfile = NULL; - tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); /* Drop read queue */ diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 64610048ce87..7d78669000d7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev) struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + usb_free_urb(req); usbpn_close(dev); return -ENOMEM; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f4ce5957df32..4cd582a4f625 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Dell branded MBM devices like DW5550 */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x413c, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + + /* Toshiba branded MBM devices */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x0930, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 2ea126a16d79..328397c66730 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -247,30 +247,12 @@ err: */ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) { - int rv; struct qmi_wwan_state *info = (void *)&dev->data; - /* ZTE makes devices where the interface descriptors and endpoint - * configurations of two or more interfaces are identical, even - * though the functions are completely different. If set, then - * driver_info->data is a bitmap of acceptable interface numbers - * allowing us to bind to one such interface without binding to - * all of them - */ - if (dev->driver_info->data && - !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) { - dev_info(&intf->dev, "not on our whitelist - ignored"); - rv = -ENODEV; - goto err; - } - /* control and data is shared */ info->control = intf; info->data = intf; - rv = qmi_wwan_register_subdriver(dev); - -err: - return rv; + return qmi_wwan_register_subdriver(dev); } static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -356,214 +338,64 @@ static const struct driver_info qmi_wwan_shared = { .manage_power = qmi_wwan_manage_power, }; -static const struct driver_info qmi_wwan_force_int0 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(0), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int1 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(1), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int2 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(2), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int3 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(3), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int4 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(4), /* interface whitelist bitmap */ -}; - -/* Sierra Wireless provide equally useless interface descriptors - * Devices in QMI mode can be switched between two different - * configurations: - * a) USB interface #8 is QMI/wwan - * b) USB interfaces #8, #19 and #20 are QMI/wwan - * - * Both configurations provide a number of other interfaces (serial++), - * some of which have the same endpoint configuration as we expect, so - * a whitelist or blacklist is necessary. - * - * FIXME: The below whitelist should include BIT(20). It does not - * because I cannot get it to work... - */ -static const struct driver_info qmi_wwan_sierra = { - .description = "Sierra Wireless wwan/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ -}; - #define HUAWEI_VENDOR_ID 0x12D1 +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_shared + /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int3 + QMI_FIXED_INTF(vend, prod, 3) -/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ +/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */ #define QMI_GOBI_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int0 + QMI_FIXED_INTF(vend, prod, 0) static const struct usb_device_id products[] = { + /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9), .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Huawei E392, E398 and possibly others in "Windows mode" - * using a combined control and data interface without any CDC - * functional descriptors - */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 17, + + /* 2. Combined interface devices matching on class+protocol */ + { /* Huawei E392, E398 and possibly others in "Windows mode" */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), .driver_info = (unsigned long)&qmi_wwan_shared, }, { /* Pantech UML290 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x106c, - .idProduct = 0x3718, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xf0, - .bInterfaceProtocol = 0xff, + USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), .driver_info = (unsigned long)&qmi_wwan_shared, }, - { /* ZTE MF820D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0167, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF821D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0326, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3520-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0055, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int1, - }, - { /* ZTE (Vodafone) K3565-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0063, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3570-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1008, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3571-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1010, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3765-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x2002, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K4505-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0104, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF60 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1402, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int2, - }, - { /* Sierra Wireless MC77xx in QMI mode */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x1199, - .idProduct = 0x68a2, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_sierra, + { /* Pantech UML290 - newer firmware */ + USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), + .driver_info = (unsigned long)&qmi_wwan_shared, }, - /* Gobi 1000 devices */ + /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ + {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ + + /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ @@ -579,7 +411,7 @@ static const struct usb_device_id products[] = { {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ - /* Gobi 2000 and 3000 devices */ + /* 5. Gobi 2000 and 3000 devices */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ @@ -589,6 +421,8 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ + {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ + {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ @@ -600,11 +434,14 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ + {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */ {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ + {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ + { } /* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index d75d1f56becf..7be49ea60b6d 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -68,15 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0); */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 -/* list of interface numbers - used for constructing interface lists */ -struct sierra_net_iface_info { - const u32 infolen; /* number of interface numbers on list */ - const u8 *ifaceinfo; /* pointer to the array holding the numbers */ -}; - struct sierra_net_info_data { u16 rx_urb_size; - struct sierra_net_iface_info whitelist; }; /* Private data structure */ @@ -637,21 +630,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu) return usbnet_change_mtu(net, new_mtu); } -static int is_whitelisted(const u8 ifnum, - const struct sierra_net_iface_info *whitelist) -{ - if (whitelist) { - const u8 *list = whitelist->ifaceinfo; - int i; - - for (i = 0; i < whitelist->infolen; i++) { - if (list[i] == ifnum) - return 1; - } - } - return 0; -} - static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) { int result = 0; @@ -706,11 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; - /* We only accept certain interfaces */ - if (!is_whitelisted(ifacenum, &data->whitelist)) { - dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); - return -ENODEV; - } numendpoints = intf->cur_altsetting->desc.bNumEndpoints; /* We have three endpoints, bulk in and out, and a status */ if (numendpoints != 3) { @@ -945,13 +918,8 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } -static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { .rx_urb_size = 8 * 1024, - .whitelist = { - .infolen = ARRAY_SIZE(sierra_net_ifnum_list), - .ifaceinfo = sierra_net_ifnum_list - } }; static const struct driver_info sierra_net_info_direct_ip = { @@ -965,15 +933,19 @@ static const struct driver_info sierra_net_info_direct_ip = { .data = (unsigned long)&sierra_net_info_data_direct_ip, }; +#define DIRECT_IP_DEVICE(vend, prod) \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip} + static const struct usb_device_id products[] = { - {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ + DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ {}, /* last item */ }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 93e0cfb739b8..ce9d4f2c9776 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netdev->watchdog_timeo = 5 * HZ; INIT_WORK(&adapter->work, vmxnet3_reset_work); + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); if (adapter->intr.type == VMXNET3_IT_MSIX) { int i; @@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_register; } - set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); vmxnet3_check_link(adapter, false); atomic_inc(&devices_found); return 0; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 9eb6479306d6..ef36cafd44b7 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev, } /* Global interrupt queue */ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); + + rc = -ENOMEM; + priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); - rc = -ENOMEM; - /* * SCC 0-3 private rx/tx irq structures * IQRX/TXi needs to be set soon. Learned it the hard way... diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 283237f6f074..def12b38cbf7 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options) unsigned barker; options_orig = kstrdup(_options, GFP_KERNEL); - if (options_orig == NULL) + if (options_orig == NULL) { + result = -ENOMEM; goto error_parse; + } options = options_orig; while ((token = strsep(&options, ",")) != NULL) { diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index efc162e0b511..88b8d64c90f1 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -342,7 +342,7 @@ static int at76_dfu_get_status(struct usb_device *udev, return ret; } -static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state) +static int at76_dfu_get_state(struct usb_device *udev, u8 *state) { int ret; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 8c4c040a47b8..2aab20ee9f38 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2056,9 +2056,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) void ath5k_beacon_config(struct ath5k_hw *ah) { - unsigned long flags; - - spin_lock_irqsave(&ah->block, flags); + spin_lock_bh(&ah->block); ah->bmisscount = 0; ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); @@ -2085,7 +2083,7 @@ ath5k_beacon_config(struct ath5k_hw *ah) ath5k_hw_set_imr(ah, ah->imask); mmiowb(); - spin_unlock_irqrestore(&ah->block, flags); + spin_unlock_bh(&ah->block); } static void ath5k_tasklet_beacon(unsigned long data) diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 260e7dc7f751..d56453e43d7e 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -254,7 +254,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ath5k_vif *avf = (void *)vif->drv_priv; struct ath5k_hw *ah = hw->priv; struct ath_common *common = ath5k_hw_common(ah); - unsigned long flags; mutex_lock(&ah->lock); @@ -300,9 +299,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } if (changes & BSS_CHANGED_BEACON) { - spin_lock_irqsave(&ah->block, flags); + spin_lock_bh(&ah->block); ath5k_beacon_update(hw, vif); - spin_unlock_irqrestore(&ah->block, flags); + spin_unlock_bh(&ah->block); } if (changes & BSS_CHANGED_BEACON_ENABLED) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index cfa91ab7acf8..60b6a9daff7e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah) case AR9300_DEVID_QCA955X: case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: + case AR9485_DEVID_AR1111: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index dd0c146d81dc..ce7332c64efb 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -49,6 +49,7 @@ #define AR9300_DEVID_AR9462 0x0034 #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 +#define AR9485_DEVID_AR1111 0x0037 #define AR5416_AR9100_DEVID 0x000b diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 7990cd55599c..b42be910a83d 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -773,15 +773,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_intrpend); -void ath9k_hw_disable_interrupts(struct ath_hw *ah) +void ath9k_hw_kill_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - if (!(ah->imask & ATH9K_INT_GLOBAL)) - atomic_set(&ah->intr_ref_cnt, -1); - else - atomic_dec(&ah->intr_ref_cnt); - ath_dbg(common, INTERRUPT, "disable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); @@ -793,6 +788,17 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); } } +EXPORT_SYMBOL(ath9k_hw_kill_interrupts); + +void ath9k_hw_disable_interrupts(struct ath_hw *ah) +{ + if (!(ah->imask & ATH9K_INT_GLOBAL)) + atomic_set(&ah->intr_ref_cnt, -1); + else + atomic_dec(&ah->intr_ref_cnt); + + ath9k_hw_kill_interrupts(ah); +} EXPORT_SYMBOL(ath9k_hw_disable_interrupts); void ath9k_hw_enable_interrupts(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 0eba36dca6f8..4a745e68dd94 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -738,6 +738,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah); void ath9k_hw_set_interrupts(struct ath_hw *ah); void ath9k_hw_enable_interrupts(struct ath_hw *ah); void ath9k_hw_disable_interrupts(struct ath_hw *ah); +void ath9k_hw_kill_interrupts(struct ath_hw *ah); void ar9002_hw_attach_mac_ops(struct ath_hw *ah); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6049d8b82855..a22df749b8db 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -462,8 +462,10 @@ irqreturn_t ath_isr(int irq, void *dev) if (!ath9k_hw_intrpend(ah)) return IRQ_NONE; - if(test_bit(SC_OP_HW_RESET, &sc->sc_flags)) + if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) { + ath9k_hw_kill_interrupts(ah); return IRQ_HANDLED; + } /* * Figure out the reason(s) for the interrupt. Note diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 87b89d55e637..a978984d78a5 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ { 0 } }; @@ -320,6 +321,7 @@ static int ath_pci_suspend(struct device *device) * Otherwise the chip never moved to full sleep, * when no interface is up. */ + ath9k_stop_btcoex(sc); ath9k_hw_disable(sc->sc_ah); ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 12aca02228c2..4480c0cc655f 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1044,7 +1044,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; - bool decrypt_error = false; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); @@ -1066,6 +1065,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) tsf_lower = tsf & 0xffffffff; do { + bool decrypt_error = false; /* If handling rx interrupt and flush is in progress => exit */ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) break; diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index b80352b308d5..a140165dfee0 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev) if (dev->dev->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; + } else if (dev->dev->chip_id == 0x5354) { + /* Don't allow overtaking buttons GPIOs */ + set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */ } - if (dev->dev->chip_id == 0x5354) - set &= 0xff02; + if (0 /* FIXME: conditional unknown */ ) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0100); - mask |= 0x0180; - set |= 0x0180; + /* BT Coexistance Input */ + mask |= 0x0080; + set |= 0x0080; + /* BT Coexistance Out */ + mask |= 0x0100; + set |= 0x0100; } if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { + /* PA is controlled by gpio 9, let ucode handle it */ b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } - if (dev->dev->core_rev >= 2) - mask |= 0x0010; /* FIXME: This is redundant. */ switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, - BCMA_CC_GPIOCTL) & mask) | set); + BCMA_CC_GPIOCTL) & ~mask) | set); break; #endif #ifdef CONFIG_B43_SSB @@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev) if (gpiodev) ssb_write32(gpiodev, B43_GPIO_CONTROL, (ssb_read32(gpiodev, B43_GPIO_CONTROL) - & mask) | set); + & ~mask) | set); break; #endif } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 57bf1d7ee80f..9ab24528f9b9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1188,7 +1188,7 @@ exit: kfree(buf); /* close file before return */ if (fp) - filp_close(fp, current->files); + filp_close(fp, NULL); /* restore previous address limit */ set_fs(old_fs); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 9a4c63f927cb..7ed7d7577024 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; - const struct ieee80211_reg_rule *reg_rule; struct txpwr_limits txpwr; - int ret; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); @@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, ); /* set or restore gmode as required by regulatory */ - ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, ®_rule); - if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM)) + if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 9e79d47e077f..192ad5c1fcc8 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = { IEEE80211_CHAN_NO_HT40PLUS), CHAN2GHZ(14, 2484, IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | - IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS | + IEEE80211_CHAN_NO_OFDM) }; static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 6fddd2785e6e..a82f46c10f5e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, */ static bool rs_use_green(struct ieee80211_sta *sta) { - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && - !(ctx->ht.non_gf_sta_present); + /* + * There's a bug somewhere in this code that causes the + * scaling to get stuck because GF+SGI can't be combined + * in SISO rates. Until we find that bug, disable GF, it + * has only limited benefit and we still interoperate with + * GF APs since we can always receive GF transmissions. + */ + return false; } /** diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index eb5de800ed90..1c10b542ab23 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv, netif_tx_wake_all_queues(priv->dev); } + kfree(cmd); done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 76caebaa4397..e970897f6ab5 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func) kfree(packet); } + kfree(card); lbs_deb_leave(LBS_DEB_SDIO); } diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 55a77e41170a..27980778d992 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -10,6 +10,7 @@ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/usb.h> +#include <linux/olpc-ec.h> #ifdef CONFIG_OLPC #include <asm/olpc.h> diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 58048189bd24..fe1ea43c5149 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -571,7 +571,10 @@ static int lbs_thread(void *data) netdev_info(dev, "Timeout submitting command 0x%04x\n", le16_to_cpu(cmdnode->cmdbuf->command)); lbs_complete_command(priv, cmdnode, -ETIMEDOUT); - if (priv->reset_card) + + /* Reset card, but only when it isn't in the process + * of being shutdown anyway. */ + if (!dev->dismantle && priv->reset_card) priv->reset_card(priv); } priv->cmd_timed_out = 0; diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 7f207b6e9552..effb044a8a9d 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 241162e8111d..7a4ae9ee1c63 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -1803,6 +1803,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, struct cfg80211_pmksa *pmksa, int max_pmkids) { + struct ndis_80211_pmkid *new_pmkids; int i, err, newlen; unsigned int count; @@ -1833,11 +1834,12 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, /* add new pmkid */ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]); - pmkids = krealloc(pmkids, newlen, GFP_KERNEL); - if (!pmkids) { + new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL); + if (!new_pmkids) { err = -ENOMEM; goto error; } + pmkids = new_pmkids; pmkids->length = cpu_to_le32(newlen); pmkids->bssid_info_count = cpu_to_le32(count + 1); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 88455b1b9fe0..cb8c2aca54e4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + int i, count; + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + if (rt2x00_get_field32(reg, WLAN_EN)) + return 0; + + rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); + rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); + rt2x00_set_field32(®, WLAN_CLK_EN, 0); + rt2x00_set_field32(®, WLAN_EN, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + + udelay(REGISTER_BUSY_DELAY); + + count = 0; + do { + /* + * Check PLL_LD & XTAL_RDY. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, CMB_CTRL, ®); + if (rt2x00_get_field32(reg, PLL_LD) && + rt2x00_get_field32(reg, XTAL_RDY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + if (i >= REGISTER_BUSY_COUNT) { + + if (count >= 10) + return -EIO; + + rt2800_register_write(rt2x00dev, 0x58, 0x018); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x418); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x618); + udelay(REGISTER_BUSY_DELAY); + count++; + } else { + count = 0; + } + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); + rt2x00_set_field32(®, WLAN_CLK_EN, 1); + rt2x00_set_field32(®, WLAN_RESET, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2x00_set_field32(®, WLAN_RESET, 0); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); + } while (count != 0); + + return 0; +} + void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) @@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, { unsigned int i; u32 reg; + int retval; + + if (rt2x00_rt(rt2x00dev, RT3290)) { + retval = rt2800_enable_wlan_rt3290(rt2x00dev); + if (retval) + return -EBUSY; + } /* * If driver doesn't wake up firmware here, diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 235376e9cb04..98aa426a3564 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) -{ - u32 reg; - int i, count; - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); - rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); - rt2x00_set_field32(®, WLAN_CLK_EN, 0); - rt2x00_set_field32(®, WLAN_EN, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - - udelay(REGISTER_BUSY_DELAY); - - count = 0; - do { - /* - * Check PLL_LD & XTAL_RDY. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, CMB_CTRL, ®); - if (rt2x00_get_field32(reg, PLL_LD) && - rt2x00_get_field32(reg, XTAL_RDY)) - break; - udelay(REGISTER_BUSY_DELAY); - } - - if (i >= REGISTER_BUSY_COUNT) { - - if (count >= 10) - return -EIO; - - rt2800_register_write(rt2x00dev, 0x58, 0x018); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x418); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x618); - udelay(REGISTER_BUSY_DELAY); - count++; - } else { - count = 0; - } - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); - rt2x00_set_field32(®, WLAN_CLK_EN, 1); - rt2x00_set_field32(®, WLAN_RESET, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2x00_set_field32(®, WLAN_RESET, 0); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); - } while (count != 0); - - return 0; -} static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; @@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* - * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan - * clk for rt3290. That avoid the MCU fail in start phase. - */ - if (rt2x00_rt(rt2x00dev, RT3290)) { - retval = rt2800_enable_wlan_rt3290(rt2x00dev); - - if (retval) - return retval; - } - - /* * This device has multiple filters for control frames * and has a separate filter for PS Poll frames. */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f32259686b45..3f7bc5cadf9a 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) { - struct ieee80211_conf conf = { .flags = 0 }; - struct rt2x00lib_conf libconf = { .conf = &conf }; + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf }; rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 71a30b026089..533024095c43 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/drivers/of/base.c b/drivers/of/base.c index c181b94abc36..d4a1c9a043e1 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -364,6 +364,33 @@ struct device_node *of_get_next_child(const struct device_node *node, EXPORT_SYMBOL(of_get_next_child); /** + * of_get_next_available_child - Find the next available child node + * @node: parent node + * @prev: previous child of the parent node, or NULL to get first + * + * This function is like of_get_next_child(), except that it + * automatically skips any disabled nodes (i.e. status = "disabled"). + */ +struct device_node *of_get_next_available_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + + read_lock(&devtree_lock); + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) { + if (!of_device_is_available(next)) + continue; + if (of_node_get(next)) + break; + } + of_node_put(prev); + read_unlock(&devtree_lock); + return next; +} +EXPORT_SYMBOL(of_get_next_available_child); + +/** * of_find_node_by_path - Find a node matching a full OF path * @path: The full path to match * diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index fbf7b26c7c8a..c5792d622dc4 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -266,8 +266,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) } if (!error) - dev_printk(KERN_INFO, &dev->dev, - "power state changed by ACPI to D%d\n", state); + dev_info(&dev->dev, "power state changed by ACPI to %s\n", + pci_power_name(state)); return error; } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 185be3703343..5270f1a99328 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -959,6 +959,13 @@ static int pci_pm_poweroff_noirq(struct device *dev) if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); + /* + * The reason for doing this here is the same as for the analogous code + * in pci_pm_suspend_noirq(). + */ + if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) + pci_write_config_word(pci_dev, PCI_COMMAND, 0); + return 0; } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index fb7f3bebdc69..dc5c126e398a 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -657,11 +657,7 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev) if (p != NULL) return ERR_PTR(-EBUSY); - p = create_pinctrl(dev); - if (IS_ERR(p)) - return p; - - return p; + return create_pinctrl(dev); } /** @@ -738,11 +734,8 @@ static struct pinctrl_state *pinctrl_lookup_state_locked(struct pinctrl *p, dev_dbg(p->dev, "using pinctrl dummy state (%s)\n", name); state = create_state(p, name); - if (IS_ERR(state)) - return state; - } else { - return ERR_PTR(-ENODEV); - } + } else + state = ERR_PTR(-ENODEV); } return state; diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c index 75d3eff94296..3674d877ed7c 100644 --- a/drivers/pinctrl/pinctrl-imx23.c +++ b/drivers/pinctrl/pinctrl-imx23.c @@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void) { return platform_driver_register(&imx23_pinctrl_driver); } -arch_initcall(imx23_pinctrl_init); +postcore_initcall(imx23_pinctrl_init); static void __exit imx23_pinctrl_exit(void) { diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c index b973026811a2..0f5b2122b1ba 100644 --- a/drivers/pinctrl/pinctrl-imx28.c +++ b/drivers/pinctrl/pinctrl-imx28.c @@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void) { return platform_driver_register(&imx28_pinctrl_driver); } -arch_initcall(imx28_pinctrl_init); +postcore_initcall(imx28_pinctrl_init); static void __exit imx28_pinctrl_exit(void) { diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c index 689b3c88dd2e..9fd02162a3c2 100644 --- a/drivers/pinctrl/pinctrl-imx51.c +++ b/drivers/pinctrl/pinctrl-imx51.c @@ -974,7 +974,7 @@ static struct imx_pin_reg imx51_pin_regs[] = { IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */ IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */ IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */ - IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ + IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */ IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */ diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c index 6f99769c6733..a39fb7a6fc51 100644 --- a/drivers/pinctrl/pinctrl-nomadik-db8500.c +++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c @@ -505,6 +505,8 @@ static const unsigned kp_b_1_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1, DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2, DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 }; +static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, + DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3}; static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3, DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6, DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8, @@ -662,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = { DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B), + DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B), @@ -751,7 +754,7 @@ DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1"); DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1"); DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1", "lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1"); -DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1"); +DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1"); DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1"); DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1"); DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1"); @@ -766,7 +769,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1", DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1"); DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1"); DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1"); -DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2"); +DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2"); DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1"); DB8500_FUNC_GROUPS(usb, "usb_a_1"); DB8500_FUNC_GROUPS(trig, "trig_b_1"); diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index 53b0d49a7a1c..3dde6537adb8 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c @@ -1292,7 +1292,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) NOMADIK_GPIO_TO_IRQ(pdata->first_gpio), 0, &nmk_gpio_irq_simple_ops, nmk_chip); if (!nmk_chip->domain) { - pr_err("%s: Failed to create irqdomain\n", np->full_name); + dev_err(&dev->dev, "failed to create irqdomain\n"); ret = -ENOSYS; goto out; } @@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) for (i = 0; i < npct->soc->gpio_num_ranges; i++) { if (!nmk_gpio_chips[i]) { dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); - devm_kfree(&pdev->dev, npct); return -EPROBE_DEFER; } npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip; diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index 2aae8a8978e9..7fca6ce5952b 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c @@ -1217,7 +1217,6 @@ out_no_rsc_remap: iounmap(spmx->gpio_virtbase); out_no_gpio_remap: platform_set_drvdata(pdev, NULL); - devm_kfree(&pdev->dev, spmx); return ret; } diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c index a7ad8c112d91..309f5b9a70ec 100644 --- a/drivers/pinctrl/pinctrl-u300.c +++ b/drivers/pinctrl/pinctrl-u300.c @@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev) upmx->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENOENT; - goto out_no_resource; - } + if (!res) + return -ENOENT; upmx->phybase = res->start; upmx->physize = resource_size(res); @@ -1165,8 +1163,6 @@ out_no_remap: platform_set_drvdata(pdev, NULL); out_no_memregion: release_mem_region(upmx->phybase, upmx->physize); -out_no_resource: - devm_kfree(&pdev->dev, upmx); return ret; } diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 782953ae4c03..b17c16ce54ad 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_X86) += x86/ +obj-$(CONFIG_OLPC) += olpc/ diff --git a/drivers/platform/olpc/Makefile b/drivers/platform/olpc/Makefile new file mode 100644 index 000000000000..dc8b26bc7209 --- /dev/null +++ b/drivers/platform/olpc/Makefile @@ -0,0 +1,4 @@ +# +# OLPC XO platform-specific drivers +# +obj-$(CONFIG_OLPC) += olpc-ec.o diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c new file mode 100644 index 000000000000..0f9f8596b300 --- /dev/null +++ b/drivers/platform/olpc/olpc-ec.c @@ -0,0 +1,336 @@ +/* + * Generic driver for the OLPC Embedded Controller. + * + * Copyright (C) 2011-2012 One Laptop per Child Foundation. + * + * Licensed under the GPL v2 or later. + */ +#include <linux/completion.h> +#include <linux/debugfs.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/module.h> +#include <linux/list.h> +#include <linux/olpc-ec.h> +#include <asm/olpc.h> + +struct ec_cmd_desc { + u8 cmd; + u8 *inbuf, *outbuf; + size_t inlen, outlen; + + int err; + struct completion finished; + struct list_head node; + + void *priv; +}; + +struct olpc_ec_priv { + struct olpc_ec_driver *drv; + struct work_struct worker; + struct mutex cmd_lock; + + /* Pending EC commands */ + struct list_head cmd_q; + spinlock_t cmd_q_lock; + + struct dentry *dbgfs_dir; + + /* + * Running an EC command while suspending means we don't always finish + * the command before the machine suspends. This means that the EC + * is expecting the command protocol to finish, but we after a period + * of time (while the OS is asleep) the EC times out and restarts its + * idle loop. Meanwhile, the OS wakes up, thinks it's still in the + * middle of the command protocol, starts throwing random things at + * the EC... and everyone's uphappy. + */ + bool suspended; +}; + +static struct olpc_ec_driver *ec_driver; +static struct olpc_ec_priv *ec_priv; +static void *ec_cb_arg; + +void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg) +{ + ec_driver = drv; + ec_cb_arg = arg; +} +EXPORT_SYMBOL_GPL(olpc_ec_driver_register); + +static void olpc_ec_worker(struct work_struct *w) +{ + struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker); + struct ec_cmd_desc *desc = NULL; + unsigned long flags; + + /* Grab the first pending command from the queue */ + spin_lock_irqsave(&ec->cmd_q_lock, flags); + if (!list_empty(&ec->cmd_q)) { + desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node); + list_del(&desc->node); + } + spin_unlock_irqrestore(&ec->cmd_q_lock, flags); + + /* Do we actually have anything to do? */ + if (!desc) + return; + + /* Protect the EC hw with a mutex; only run one cmd at a time */ + mutex_lock(&ec->cmd_lock); + desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen, + desc->outbuf, desc->outlen, ec_cb_arg); + mutex_unlock(&ec->cmd_lock); + + /* Finished, wake up olpc_ec_cmd() */ + complete(&desc->finished); + + /* Run the worker thread again in case there are more cmds pending */ + schedule_work(&ec->worker); +} + +/* + * Throw a cmd descripter onto the list. We now have SMP OLPC machines, so + * locking is pretty critical. + */ +static void queue_ec_descriptor(struct ec_cmd_desc *desc, + struct olpc_ec_priv *ec) +{ + unsigned long flags; + + INIT_LIST_HEAD(&desc->node); + + spin_lock_irqsave(&ec->cmd_q_lock, flags); + list_add_tail(&desc->node, &ec->cmd_q); + spin_unlock_irqrestore(&ec->cmd_q_lock, flags); + + schedule_work(&ec->worker); +} + +int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen) +{ + struct olpc_ec_priv *ec = ec_priv; + struct ec_cmd_desc desc; + + /* Ensure a driver and ec hook have been registered */ + if (WARN_ON(!ec_driver || !ec_driver->ec_cmd)) + return -ENODEV; + + if (!ec) + return -ENOMEM; + + /* Suspending in the middle of a command hoses things really badly */ + if (WARN_ON(ec->suspended)) + return -EBUSY; + + might_sleep(); + + desc.cmd = cmd; + desc.inbuf = inbuf; + desc.outbuf = outbuf; + desc.inlen = inlen; + desc.outlen = outlen; + desc.err = 0; + init_completion(&desc.finished); + + queue_ec_descriptor(&desc, ec); + + /* Timeouts must be handled in the platform-specific EC hook */ + wait_for_completion(&desc.finished); + + /* The worker thread dequeues the cmd; no need to do anything here */ + return desc.err; +} +EXPORT_SYMBOL_GPL(olpc_ec_cmd); + +#ifdef CONFIG_DEBUG_FS + +/* + * debugfs support for "generic commands", to allow sending + * arbitrary EC commands from userspace. + */ + +#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */ +#define EC_MAX_CMD_REPLY (8) + +static DEFINE_MUTEX(ec_dbgfs_lock); +static unsigned char ec_dbgfs_resp[EC_MAX_CMD_REPLY]; +static unsigned int ec_dbgfs_resp_bytes; + +static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + int i, m; + unsigned char ec_cmd[EC_MAX_CMD_ARGS]; + unsigned int ec_cmd_int[EC_MAX_CMD_ARGS]; + char cmdbuf[64]; + int ec_cmd_bytes; + + mutex_lock(&ec_dbgfs_lock); + + size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size); + + m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0], + &ec_dbgfs_resp_bytes, &ec_cmd_int[1], &ec_cmd_int[2], + &ec_cmd_int[3], &ec_cmd_int[4], &ec_cmd_int[5]); + if (m < 2 || ec_dbgfs_resp_bytes > EC_MAX_CMD_REPLY) { + /* reset to prevent overflow on read */ + ec_dbgfs_resp_bytes = 0; + + pr_debug("olpc-ec: bad ec cmd: cmd:response-count [arg1 [arg2 ...]]\n"); + size = -EINVAL; + goto out; + } + + /* convert scanf'd ints to char */ + ec_cmd_bytes = m - 2; + for (i = 0; i <= ec_cmd_bytes; i++) + ec_cmd[i] = ec_cmd_int[i]; + + pr_debug("olpc-ec: debugfs cmd 0x%02x with %d args %02x %02x %02x %02x %02x, want %d returns\n", + ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], + ec_cmd[3], ec_cmd[4], ec_cmd[5], ec_dbgfs_resp_bytes); + + olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1], + ec_cmd_bytes, ec_dbgfs_resp, ec_dbgfs_resp_bytes); + + pr_debug("olpc-ec: response %02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n", + ec_dbgfs_resp[0], ec_dbgfs_resp[1], ec_dbgfs_resp[2], + ec_dbgfs_resp[3], ec_dbgfs_resp[4], ec_dbgfs_resp[5], + ec_dbgfs_resp[6], ec_dbgfs_resp[7], + ec_dbgfs_resp_bytes); + +out: + mutex_unlock(&ec_dbgfs_lock); + return size; +} + +static ssize_t ec_dbgfs_cmd_read(struct file *file, char __user *buf, + size_t size, loff_t *ppos) +{ + unsigned int i, r; + char *rp; + char respbuf[64]; + + mutex_lock(&ec_dbgfs_lock); + rp = respbuf; + rp += sprintf(rp, "%02x", ec_dbgfs_resp[0]); + for (i = 1; i < ec_dbgfs_resp_bytes; i++) + rp += sprintf(rp, ", %02x", ec_dbgfs_resp[i]); + mutex_unlock(&ec_dbgfs_lock); + rp += sprintf(rp, "\n"); + + r = rp - respbuf; + return simple_read_from_buffer(buf, size, ppos, respbuf, r); +} + +static const struct file_operations ec_dbgfs_ops = { + .write = ec_dbgfs_cmd_write, + .read = ec_dbgfs_cmd_read, +}; + +static struct dentry *olpc_ec_setup_debugfs(void) +{ + struct dentry *dbgfs_dir; + + dbgfs_dir = debugfs_create_dir("olpc-ec", NULL); + if (IS_ERR_OR_NULL(dbgfs_dir)) + return NULL; + + debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops); + + return dbgfs_dir; +} + +#else + +static struct dentry *olpc_ec_setup_debugfs(void) +{ + return NULL; +} + +#endif /* CONFIG_DEBUG_FS */ + +static int olpc_ec_probe(struct platform_device *pdev) +{ + struct olpc_ec_priv *ec; + int err; + + if (!ec_driver) + return -ENODEV; + + ec = kzalloc(sizeof(*ec), GFP_KERNEL); + if (!ec) + return -ENOMEM; + + ec->drv = ec_driver; + INIT_WORK(&ec->worker, olpc_ec_worker); + mutex_init(&ec->cmd_lock); + + INIT_LIST_HEAD(&ec->cmd_q); + spin_lock_init(&ec->cmd_q_lock); + + ec_priv = ec; + platform_set_drvdata(pdev, ec); + + err = ec_driver->probe ? ec_driver->probe(pdev) : 0; + if (err) { + ec_priv = NULL; + kfree(ec); + } else { + ec->dbgfs_dir = olpc_ec_setup_debugfs(); + } + + return err; +} + +static int olpc_ec_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct olpc_ec_priv *ec = platform_get_drvdata(pdev); + int err = 0; + + if (ec_driver->suspend) + err = ec_driver->suspend(pdev); + if (!err) + ec->suspended = true; + + return err; +} + +static int olpc_ec_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct olpc_ec_priv *ec = platform_get_drvdata(pdev); + + ec->suspended = false; + return ec_driver->resume ? ec_driver->resume(pdev) : 0; +} + +static const struct dev_pm_ops olpc_ec_pm_ops = { + .suspend_late = olpc_ec_suspend, + .resume_early = olpc_ec_resume, +}; + +static struct platform_driver olpc_ec_plat_driver = { + .probe = olpc_ec_probe, + .driver = { + .name = "olpc-ec", + .pm = &olpc_ec_pm_ops, + }, +}; + +static int __init olpc_ec_init_module(void) +{ + return platform_driver_register(&olpc_ec_plat_driver); +} + +module_init(olpc_ec_init_module); + +MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 2a262f5c5c0c..c86bae828c28 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -289,6 +289,7 @@ config IDEAPAD_LAPTOP tristate "Lenovo IdeaPad Laptop Extras" depends on ACPI depends on RFKILL && INPUT + depends on SERIO_I8042 select INPUT_SPARSEKMAP help This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. @@ -758,8 +759,11 @@ config SAMSUNG_Q10 config APPLE_GMUX tristate "Apple Gmux Driver" + depends on ACPI depends on PNP - select BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE + depends on ACPI_VIDEO=n || ACPI_VIDEO ---help--- This driver provides support for the gmux device found on many Apple laptops, which controls the display mux for the hybrid diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 905fa01ac8df..dfb1a92ce949 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -2,6 +2,7 @@ * Gmux driver for Apple laptops * * Copyright (C) Canonical Ltd. <seth.forshee@canonical.com> + * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,16 +19,30 @@ #include <linux/pnp.h> #include <linux/apple_bl.h> #include <linux/slab.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/vga_switcheroo.h> #include <acpi/video.h> #include <asm/io.h> struct apple_gmux_data { unsigned long iostart; unsigned long iolen; + bool indexed; + struct mutex index_lock; struct backlight_device *bdev; + + /* switcheroo data */ + acpi_handle dhandle; + int gpe; + enum vga_switcheroo_client_id resume_client_id; + enum vga_switcheroo_state power_state; + struct completion powerchange_done; }; +static struct apple_gmux_data *apple_gmux_data; + /* * gmux port offsets. Many of these are not yet used, but may be in the * future, and it's useful to have them documented here anyhow. @@ -45,6 +60,9 @@ struct apple_gmux_data { #define GMUX_PORT_DISCRETE_POWER 0x50 #define GMUX_PORT_MAX_BRIGHTNESS 0x70 #define GMUX_PORT_BRIGHTNESS 0x74 +#define GMUX_PORT_VALUE 0xc2 +#define GMUX_PORT_READ 0xd0 +#define GMUX_PORT_WRITE 0xd4 #define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) @@ -59,22 +77,172 @@ struct apple_gmux_data { #define GMUX_BRIGHTNESS_MASK 0x00ffffff #define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK -static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) { return inb(gmux_data->iostart + port); } -static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port, +static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port, u8 val) { outb(val, gmux_data->iostart + port); } -static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port) { return inl(gmux_data->iostart + port); } +static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + int i; + u8 tmpval; + + for (i = 0; i < 4; i++) { + tmpval = (val >> (i * 8)) & 0xff; + outb(tmpval, port + i); + } +} + +static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data) +{ + int i = 200; + u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + + while (i && (gwr & 0x01)) { + inb(gmux_data->iostart + GMUX_PORT_READ); + gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + udelay(100); + i--; + } + + return !!i; +} + +static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data) +{ + int i = 200; + u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + + while (i && !(gwr & 0x01)) { + gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + udelay(100); + i--; + } + + if (gwr & 0x01) + inb(gmux_data->iostart + GMUX_PORT_READ); + + return !!i; +} + +static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port) +{ + u8 val; + + mutex_lock(&gmux_data->index_lock); + outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); + gmux_index_wait_ready(gmux_data); + val = inb(gmux_data->iostart + GMUX_PORT_VALUE); + mutex_unlock(&gmux_data->index_lock); + + return val; +} + +static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port, + u8 val) +{ + mutex_lock(&gmux_data->index_lock); + outb(val, gmux_data->iostart + GMUX_PORT_VALUE); + gmux_index_wait_ready(gmux_data); + outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); + gmux_index_wait_complete(gmux_data); + mutex_unlock(&gmux_data->index_lock); +} + +static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port) +{ + u32 val; + + mutex_lock(&gmux_data->index_lock); + outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); + gmux_index_wait_ready(gmux_data); + val = inl(gmux_data->iostart + GMUX_PORT_VALUE); + mutex_unlock(&gmux_data->index_lock); + + return val; +} + +static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + int i; + u8 tmpval; + + mutex_lock(&gmux_data->index_lock); + + for (i = 0; i < 4; i++) { + tmpval = (val >> (i * 8)) & 0xff; + outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i); + } + + gmux_index_wait_ready(gmux_data); + outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); + gmux_index_wait_complete(gmux_data); + mutex_unlock(&gmux_data->index_lock); +} + +static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +{ + if (gmux_data->indexed) + return gmux_index_read8(gmux_data, port); + else + return gmux_pio_read8(gmux_data, port); +} + +static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val) +{ + if (gmux_data->indexed) + gmux_index_write8(gmux_data, port, val); + else + gmux_pio_write8(gmux_data, port, val); +} + +static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +{ + if (gmux_data->indexed) + return gmux_index_read32(gmux_data, port); + else + return gmux_pio_read32(gmux_data, port); +} + +static void gmux_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + if (gmux_data->indexed) + gmux_index_write32(gmux_data, port, val); + else + gmux_pio_write32(gmux_data, port, val); +} + +static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) +{ + u16 val; + + outb(0xaa, gmux_data->iostart + 0xcc); + outb(0x55, gmux_data->iostart + 0xcd); + outb(0x00, gmux_data->iostart + 0xce); + + val = inb(gmux_data->iostart + 0xcc) | + (inb(gmux_data->iostart + 0xcd) << 8); + + if (val == 0x55aa) + return true; + + return false; +} + static int gmux_get_brightness(struct backlight_device *bd) { struct apple_gmux_data *gmux_data = bl_get_data(bd); @@ -90,16 +258,7 @@ static int gmux_update_status(struct backlight_device *bd) if (bd->props.state & BL_CORE_SUSPENDED) return 0; - /* - * Older gmux versions require writing out lower bytes first then - * setting the upper byte to 0 to flush the values. Newer versions - * accept a single u32 write, but the old method also works, so we - * just use the old method for all gmux versions. - */ - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0); + gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); return 0; } @@ -110,6 +269,146 @@ static const struct backlight_ops gmux_bl_ops = { .update_status = gmux_update_status, }; +static int gmux_switchto(enum vga_switcheroo_client_id id) +{ + if (id == VGA_SWITCHEROO_IGD) { + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); + } else { + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); + } + + return 0; +} + +static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, + enum vga_switcheroo_state state) +{ + INIT_COMPLETION(gmux_data->powerchange_done); + + if (state == VGA_SWITCHEROO_ON) { + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3); + pr_debug("Discrete card powered up\n"); + } else { + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0); + pr_debug("Discrete card powered down\n"); + } + + gmux_data->power_state = state; + + if (gmux_data->gpe >= 0 && + !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done, + msecs_to_jiffies(200))) + pr_warn("Timeout waiting for gmux switch to complete\n"); + + return 0; +} + +static int gmux_set_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + if (id == VGA_SWITCHEROO_IGD) + return 0; + + return gmux_set_discrete_state(apple_gmux_data, state); +} + +static int gmux_get_client_id(struct pci_dev *pdev) +{ + /* + * Early Macbook Pros with switchable graphics use nvidia + * integrated graphics. Hardcode that the 9400M is integrated. + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + return VGA_SWITCHEROO_IGD; + else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && + pdev->device == 0x0863) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static enum vga_switcheroo_client_id +gmux_active_client(struct apple_gmux_data *gmux_data) +{ + if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) + return VGA_SWITCHEROO_IGD; + + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler gmux_handler = { + .switchto = gmux_switchto, + .power_state = gmux_set_power_state, + .get_client_id = gmux_get_client_id, +}; + +static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) +{ + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, + GMUX_INTERRUPT_DISABLE); +} + +static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data) +{ + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, + GMUX_INTERRUPT_ENABLE); +} + +static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data) +{ + return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS); +} + +static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data) +{ + u8 status; + + /* to clear interrupts write back current status */ + status = gmux_interrupt_get_status(gmux_data); + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status); +} + +static void gmux_notify_handler(acpi_handle device, u32 value, void *context) +{ + u8 status; + struct pnp_dev *pnp = (struct pnp_dev *)context; + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + + status = gmux_interrupt_get_status(gmux_data); + gmux_disable_interrupts(gmux_data); + pr_debug("Notify handler called: status %d\n", status); + + gmux_clear_interrupts(gmux_data); + gmux_enable_interrupts(gmux_data); + + if (status & GMUX_INTERRUPT_STATUS_POWER) + complete(&gmux_data->powerchange_done); +} + +static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state) +{ + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + gmux_data->resume_client_id = gmux_active_client(gmux_data); + gmux_disable_interrupts(gmux_data); + return 0; +} + +static int gmux_resume(struct pnp_dev *pnp) +{ + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + gmux_enable_interrupts(gmux_data); + gmux_switchto(gmux_data->resume_client_id); + if (gmux_data->power_state == VGA_SWITCHEROO_OFF) + gmux_set_discrete_state(gmux_data, gmux_data->power_state); + return 0; +} + static int __devinit gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { @@ -119,6 +418,11 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, struct backlight_device *bdev; u8 ver_major, ver_minor, ver_release; int ret = -ENXIO; + acpi_status status; + unsigned long long gpe; + + if (apple_gmux_data) + return -EBUSY; gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); if (!gmux_data) @@ -147,22 +451,29 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, } /* - * On some machines the gmux is in ACPI even thought the machine - * doesn't really have a gmux. Check for invalid version information - * to detect this. + * Invalid version information may indicate either that the gmux + * device isn't present or that it's a new one that uses indexed + * io */ + ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { - pr_info("gmux device not present\n"); - ret = -ENODEV; - goto err_release; + if (gmux_is_indexed(gmux_data)) { + mutex_init(&gmux_data->index_lock); + gmux_data->indexed = true; + } else { + pr_info("gmux device not present\n"); + ret = -ENODEV; + goto err_release; + } + pr_info("Found indexed gmux\n"); + } else { + pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, + ver_release); } - pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, - ver_release); - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); @@ -194,13 +505,67 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, * Disable the other backlight choices. */ acpi_video_dmi_promote_vendor(); -#ifdef CONFIG_ACPI_VIDEO +#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE) acpi_video_unregister(); #endif apple_bl_unregister(); + gmux_data->power_state = VGA_SWITCHEROO_ON; + + gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); + if (!gmux_data->dhandle) { + pr_err("Cannot find acpi handle for pnp device %s\n", + dev_name(&pnp->dev)); + ret = -ENODEV; + goto err_notify; + } + + status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe); + if (ACPI_SUCCESS(status)) { + gmux_data->gpe = (int)gpe; + + status = acpi_install_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler, pnp); + if (ACPI_FAILURE(status)) { + pr_err("Install notify handler failed: %s\n", + acpi_format_exception(status)); + ret = -ENODEV; + goto err_notify; + } + + status = acpi_enable_gpe(NULL, gmux_data->gpe); + if (ACPI_FAILURE(status)) { + pr_err("Cannot enable gpe: %s\n", + acpi_format_exception(status)); + goto err_enable_gpe; + } + } else { + pr_warn("No GPE found for gmux\n"); + gmux_data->gpe = -1; + } + + if (vga_switcheroo_register_handler(&gmux_handler)) { + ret = -ENODEV; + goto err_register_handler; + } + + init_completion(&gmux_data->powerchange_done); + apple_gmux_data = gmux_data; + gmux_enable_interrupts(gmux_data); + return 0; +err_register_handler: + if (gmux_data->gpe >= 0) + acpi_disable_gpe(NULL, gmux_data->gpe); +err_enable_gpe: + if (gmux_data->gpe >= 0) + acpi_remove_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler); +err_notify: + backlight_device_unregister(bdev); err_release: release_region(gmux_data->iostart, gmux_data->iolen); err_free: @@ -212,12 +577,23 @@ static void __devexit gmux_remove(struct pnp_dev *pnp) { struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + vga_switcheroo_unregister_handler(); + gmux_disable_interrupts(gmux_data); + if (gmux_data->gpe >= 0) { + acpi_disable_gpe(NULL, gmux_data->gpe); + acpi_remove_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler); + } + backlight_device_unregister(gmux_data->bdev); + release_region(gmux_data->iostart, gmux_data->iolen); + apple_gmux_data = NULL; kfree(gmux_data); acpi_video_dmi_demote_vendor(); -#ifdef CONFIG_ACPI_VIDEO +#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE) acpi_video_register(); #endif apple_bl_register(); @@ -233,6 +609,8 @@ static struct pnp_driver gmux_pnp_driver = { .probe = gmux_probe, .remove = __devexit_p(gmux_remove), .id_table = gmux_device_ids, + .suspend = gmux_suspend, + .resume = gmux_resume }; static int __init apple_gmux_init(void) diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index c7a36f6b0580..2eb9fe8e8efd 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -101,6 +101,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 #define ASUS_WMI_DEVID_CWAP 0x00010003 #define ASUS_WMI_DEVID_WLAN 0x00010011 +#define ASUS_WMI_DEVID_WLAN_LED 0x00010012 #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 #define ASUS_WMI_DEVID_GPS 0x00010015 #define ASUS_WMI_DEVID_WIMAX 0x00010017 @@ -731,8 +732,21 @@ static int asus_rfkill_set(void *data, bool blocked) { struct asus_rfkill *priv = data; u32 ctrl_param = !blocked; + u32 dev_id = priv->dev_id; - return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); + /* + * If the user bit is set, BIOS can't set and record the wlan status, + * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED + * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN). + * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED + * while setting the wlan status through WMI. + * This is also the behavior that windows app will do. + */ + if ((dev_id == ASUS_WMI_DEVID_WLAN) && + priv->asus->driver->wlan_ctrl_by_user) + dev_id = ASUS_WMI_DEVID_WLAN_LED; + + return asus_wmi_set_devstate(dev_id, ctrl_param, NULL); } static void asus_rfkill_query(struct rfkill *rfkill, void *data) @@ -1653,6 +1667,7 @@ static int asus_wmi_add(struct platform_device *pdev) struct asus_wmi *asus; acpi_status status; int err; + u32 result; asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); if (!asus) @@ -1711,6 +1726,10 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_debugfs; + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); + if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + asus->driver->wlan_ctrl_by_user = 1; + return 0; fail_debugfs: diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index 9c1da8b81bea..4c9bd38bb0a2 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -46,6 +46,7 @@ struct quirk_entry { struct asus_wmi_driver { int brightness; int panel_power; + int wlan_ctrl_by_user; const char *name; struct module *owner; diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 2ca7dd1ab3e4..c87ff16873f9 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev) inputdev->close = cmpc_accel_close_v4; } +#ifdef CONFIG_PM_SLEEP static int cmpc_accel_suspend_v4(struct device *dev) { struct input_dev *inputdev; @@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev) return 0; } +#endif static int cmpc_accel_add_v4(struct acpi_device *acpi) { @@ -723,8 +725,10 @@ static void cmpc_tablet_handler(struct acpi_device *dev, u32 event) struct input_dev *inputdev = dev_get_drvdata(&dev->dev); if (event == 0x81) { - if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } } } @@ -737,8 +741,10 @@ static void cmpc_tablet_idev_init(struct input_dev *inputdev) set_bit(SW_TABLET_MODE, inputdev->swbit); acpi = to_acpi_device(inputdev->dev.parent); - if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } } static int cmpc_tablet_add(struct acpi_device *acpi) @@ -752,15 +758,19 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type) return cmpc_remove_acpi_notify_device(acpi); } +#ifdef CONFIG_PM_SLEEP static int cmpc_tablet_resume(struct device *dev) { struct input_dev *inputdev = dev_get_drvdata(dev); unsigned long long val = 0; - if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } return 0; } +#endif static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 4e96e8c0b60f..927c33af67ec 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -211,7 +211,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5420"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -220,7 +220,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -229,7 +229,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5720", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5720"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7420"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -247,7 +247,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -256,7 +256,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7720", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7720"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"), }, .driver_data = &quirk_dell_vostro_v130, }, diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c index d2e41735a47b..7acae3f85f3b 100644 --- a/drivers/platform/x86/fujitsu-tablet.c +++ b/drivers/platform/x86/fujitsu-tablet.c @@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fujitsu_resume(struct device *dev) { fujitsu_reset(); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume); diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index d9ab6f64dcec..777c7e3dda51 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP static int hdaps_resume(struct device *dev) { return hdaps_device_init(); } +#endif static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume); diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index f4d91154ad67..6b9af989632b 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int lis3lv02d_suspend(struct device *dev) { /* make sure the device is off when we suspend */ diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17f6dfd8dbfb..dae7abe1d711 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -36,6 +36,7 @@ #include <linux/fb.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/i8042.h> #define IDEAPAD_RFKILL_DEV_NUM (3) @@ -63,8 +64,11 @@ enum { VPCCMD_R_3G, VPCCMD_W_3G, VPCCMD_R_ODD, /* 0x21 */ - VPCCMD_R_RF = 0x23, + VPCCMD_W_FAN, + VPCCMD_R_RF, VPCCMD_W_RF, + VPCCMD_R_FAN = 0x2B, + VPCCMD_R_SPECIAL_BUTTONS = 0x31, VPCCMD_W_BL_POWER = 0x33, }; @@ -356,14 +360,46 @@ static ssize_t store_ideapad_cam(struct device *dev, return -EINVAL; ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state); if (ret < 0) - return ret; + return -EIO; return count; } static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); +static ssize_t show_ideapad_fan(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long result; + + if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result)) + return sprintf(buf, "-1\n"); + return sprintf(buf, "%lu\n", result); +} + +static ssize_t store_ideapad_fan(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret, state; + + if (!count) + return 0; + if (sscanf(buf, "%i", &state) != 1) + return -EINVAL; + if (state < 0 || state > 4 || state == 3) + return -EINVAL; + ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state); + if (ret < 0) + return -EIO; + return count; +} + +static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan); + static struct attribute *ideapad_attributes[] = { &dev_attr_camera_power.attr, + &dev_attr_fan_mode.attr, NULL }; @@ -377,7 +413,10 @@ static umode_t ideapad_is_visible(struct kobject *kobj, if (attr == &dev_attr_camera_power.attr) supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); - else + else if (attr == &dev_attr_fan_mode.attr) { + unsigned long value; + supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value); + } else supported = true; return supported ? attr->mode : 0; @@ -518,9 +557,15 @@ static void ideapad_platform_exit(struct ideapad_private *priv) */ static const struct key_entry ideapad_keymap[] = { { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, + { KE_KEY, 7, { KEY_CAMERA } }, + { KE_KEY, 11, { KEY_F16 } }, { KE_KEY, 13, { KEY_WLAN } }, { KE_KEY, 16, { KEY_PROG1 } }, { KE_KEY, 17, { KEY_PROG2 } }, + { KE_KEY, 64, { KEY_PROG3 } }, + { KE_KEY, 65, { KEY_PROG4 } }, + { KE_KEY, 66, { KEY_TOUCHPAD_OFF } }, + { KE_KEY, 67, { KEY_TOUCHPAD_ON } }, { KE_END, 0 }, }; @@ -587,6 +632,28 @@ static void ideapad_input_novokey(struct ideapad_private *priv) ideapad_input_report(priv, 16); } +static void ideapad_check_special_buttons(struct ideapad_private *priv) +{ + unsigned long bit, value; + + read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value); + + for (bit = 0; bit < 16; bit++) { + if (test_bit(bit, &value)) { + switch (bit) { + case 6: + /* Thermal Management button */ + ideapad_input_report(priv, 65); + break; + case 1: + /* OneKey Theater button */ + ideapad_input_report(priv, 64); + break; + } + } + } +} + /* * backlight */ @@ -691,6 +758,24 @@ static const struct acpi_device_id ideapad_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); +static void ideapad_sync_touchpad_state(struct acpi_device *adevice) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + unsigned long value; + + /* Without reading from EC touchpad LED doesn't switch state */ + if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) { + /* Some IdeaPads don't really turn off touchpad - they only + * switch the LED state. We (de)activate KBC AUX port to turn + * touchpad off and on. We send KEY_TOUCHPAD_OFF and + * KEY_TOUCHPAD_ON to not to get out of sync with LED */ + unsigned char param; + i8042_command(¶m, value ? I8042_CMD_AUX_ENABLE : + I8042_CMD_AUX_DISABLE); + ideapad_input_report(priv, value ? 67 : 66); + } +} + static int __devinit ideapad_acpi_add(struct acpi_device *adevice) { int ret, i; @@ -727,6 +812,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) priv->rfk[i] = NULL; } ideapad_sync_rfk_state(priv); + ideapad_sync_touchpad_state(adevice); if (!acpi_video_backlight_support()) { ret = ideapad_backlight_init(priv); @@ -785,9 +871,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) ideapad_sync_rfk_state(priv); break; case 13: + case 11: + case 7: case 6: ideapad_input_report(priv, vpc_bit); break; + case 5: + ideapad_sync_touchpad_state(adevice); + break; case 4: ideapad_backlight_notify_brightness(priv); break; @@ -797,6 +888,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) case 2: ideapad_backlight_notify_power(priv); break; + case 0: + ideapad_check_special_buttons(priv); + break; default: pr_info("Unknown event: %lu\n", vpc_bit); } @@ -804,6 +898,15 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) } } +static int ideapad_acpi_resume(struct device *device) +{ + ideapad_sync_rfk_state(ideapad_priv); + ideapad_sync_touchpad_state(to_acpi_device(device)); + return 0; +} + +static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); + static struct acpi_driver ideapad_acpi_driver = { .name = "ideapad_acpi", .class = "IdeaPad", @@ -811,6 +914,7 @@ static struct acpi_driver ideapad_acpi_driver = { .ops.add = ideapad_acpi_add, .ops.remove = ideapad_acpi_remove, .ops.notify = ideapad_acpi_notify, + .drv.pm = &ideapad_pm, .owner = THIS_MODULE, }; diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index f64441844317..2111dbb7e1e3 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -85,7 +85,9 @@ #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device); +#endif static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume); #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f @@ -753,6 +755,7 @@ err_bluetooth: return retval; } +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device) { u8 data; @@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device) return 0; } +#endif static int __init msi_laptop_input_setup(void) { diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 24480074bcf0..8e8caa767d6a 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); static struct acpi_driver acpi_pcc_driver = { @@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) /* kernel module interface */ +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev) { struct pcc_acpi *pcc; @@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev) return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); } +#endif static int acpi_pcc_hotkey_add(struct acpi_device *device) { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 9363969ad07a..daaddec68def 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " "(default: 0)"); +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void); +static void sony_nc_thermal_resume(void); +#endif static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); @@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd); static int sony_nc_thermal_setup(struct platform_device *pd); static void sony_nc_thermal_cleanup(struct platform_device *pd); -static void sony_nc_thermal_resume(void); static int sony_nc_lid_resume_setup(struct platform_device *pd); static void sony_nc_lid_resume_cleanup(struct platform_device *pd); @@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) sony_nc_handles_cleanup(pd); } +#ifdef CONFIG_PM_SLEEP static void sony_nc_function_resume(void) { unsigned int i, result, bitmask, arg; @@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); @@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void) { int ignore = 0; @@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void) (kbdbl_ctl->base + 0x200) | (kbdbl_ctl->timeout << 0x10), &ignore); } +#endif struct battery_care_control { struct device_attribute attrs[2]; @@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_thermal_resume(void) { unsigned int status = sony_nc_thermal_mode_get(); @@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void) if (status != th_handle->mode) sony_nc_thermal_mode_set(th_handle->mode); } +#endif /* resume on LID open */ struct snc_lid_resume_control { @@ -4287,6 +4295,7 @@ err_free_resources: return result; } +#ifdef CONFIG_PM_SLEEP static int sony_pic_suspend(struct device *dev) { if (sony_pic_disable(to_acpi_device(dev))) @@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev) spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e7f73287636c..80e377949314 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev; static struct mutex tpacpi_inputdev_send_mutex; static LIST_HEAD(tpacpi_all_drivers); +#ifdef CONFIG_PM_SLEEP static int tpacpi_suspend_handler(struct device *dev) { struct ibm_struct *ibm, *itmp; @@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(tpacpi_pm, tpacpi_suspend_handler, tpacpi_resume_handler); @@ -8662,6 +8664,13 @@ static int __must_check __init get_thinkpad_model_data( tp->model_str = kstrdup(s, GFP_KERNEL); if (!tp->model_str) return -ENOMEM; + } else { + s = dmi_get_system_info(DMI_BIOS_VENDOR); + if (s && !(strnicmp(s, "Lenovo", 6))) { + tp->model_str = kstrdup(s, GFP_KERNEL); + if (!tp->model_str) + return -ENOMEM; + } } s = dmi_get_system_info(DMI_PRODUCT_NAME); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index c13ba5bac93f..5f1256d5e933 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int toshiba_acpi_suspend(struct device *device) { struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); @@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device) return 0; } +#endif static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm, toshiba_acpi_suspend, toshiba_acpi_resume); diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c index 715a43cb5e3c..5e5d6317d690 100644 --- a/drivers/platform/x86/toshiba_bluetooth.c +++ b/drivers/platform/x86/toshiba_bluetooth.c @@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, bt_device_ids); +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); static struct acpi_driver toshiba_bt_rfkill_driver = { @@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) toshiba_bluetooth_enable(device->handle); } +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev) { return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); } +#endif static int toshiba_bt_rfkill_add(struct acpi_device *device) { diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c index b57ad8641480..1da13ed34b04 100644 --- a/drivers/platform/x86/xo1-rfkill.c +++ b/drivers/platform/x86/xo1-rfkill.c @@ -12,8 +12,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rfkill.h> - -#include <asm/olpc.h> +#include <linux/olpc-ec.h> static bool card_blocked; diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c index 849c07c13bf6..38ba39d7ca7d 100644 --- a/drivers/platform/x86/xo15-ebook.c +++ b/drivers/platform/x86/xo15-ebook.c @@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int ebook_switch_resume(struct device *dev) { return ebook_send_state(to_acpi_device(dev)); } +#endif static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume); diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 55b10b813353..a89a41acf9c5 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -17,6 +17,7 @@ #include <linux/power_supply.h> #include <linux/jiffies.h> #include <linux/sched.h> +#include <linux/olpc-ec.h> #include <asm/olpc.h> diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 722246cf20ab..5d44252b7342 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work) " info %4.4x\n", DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; } iowrite32(rd_ptr & (IDB_QSIZE - 1), @@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work) regval |= TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); } /** @@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i, cap; + int cap; int err; u32 regval; @@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, priv->pdev = pdev; #ifdef DEBUG + { + int i; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", i, (unsigned long long)pci_resource_start(pdev, i), (unsigned long)pci_resource_len(pdev, i), pci_resource_flags(pdev, i)); } + } #endif /* * Verify BAR configuration diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 182b553059c9..c151fd5d8c97 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -486,6 +486,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .id = AB3100_BUCK, .ops = ®ulator_ops_variable_sleepable, .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), + .volt_table = ldo_e_buck_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_time = 1000, diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index e9c2085f9dfb..ce0fe72a428e 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -64,14 +64,15 @@ static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector) static int anatop_get_voltage_sel(struct regulator_dev *reg) { struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); - u32 val; + u32 val, mask; if (!anatop_reg->control_reg) return -ENOTSUPP; val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg); - val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >> + mask = ((1 << anatop_reg->vol_bit_width) - 1) << anatop_reg->vol_bit_shift; + val = (val & mask) >> anatop_reg->vol_bit_shift; return val - anatop_reg->min_bit_val; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index f092588a078c..48385318175a 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3217,7 +3217,7 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); - if (config->ena_gpio) { + if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { ret = gpio_request_one(config->ena_gpio, GPIOF_DIR_OUT | config->ena_gpio_flags, rdev_get_name(rdev)); diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 34b67bee9323..8b5944f2d7d1 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -57,16 +57,17 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) return -EINVAL; } -static int gpio_regulator_set_value(struct regulator_dev *dev, - int min, int max, unsigned *selector) +static int gpio_regulator_set_voltage(struct regulator_dev *dev, + int min_uV, int max_uV, + unsigned *selector) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); int ptr, target = 0, state, best_val = INT_MAX; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].value < best_val && - data->states[ptr].value >= min && - data->states[ptr].value <= max) { + data->states[ptr].value >= min_uV && + data->states[ptr].value <= max_uV) { target = data->states[ptr].gpios; best_val = data->states[ptr].value; if (selector) @@ -85,13 +86,6 @@ static int gpio_regulator_set_value(struct regulator_dev *dev, return 0; } -static int gpio_regulator_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV, - unsigned *selector) -{ - return gpio_regulator_set_value(dev, min_uV, max_uV, selector); -} - static int gpio_regulator_list_voltage(struct regulator_dev *dev, unsigned selector) { @@ -106,7 +100,27 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, static int gpio_regulator_set_current_limit(struct regulator_dev *dev, int min_uA, int max_uA) { - return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + int ptr, target = 0, state, best_val = 0; + + for (ptr = 0; ptr < data->nr_states; ptr++) + if (data->states[ptr].value > best_val && + data->states[ptr].value >= min_uA && + data->states[ptr].value <= max_uA) { + target = data->states[ptr].gpios; + best_val = data->states[ptr].value; + } + + if (best_val == 0) + return -EINVAL; + + for (ptr = 0; ptr < data->nr_gpios; ptr++) { + state = (target & (1 << ptr)) >> ptr; + gpio_set_value(data->gpios[ptr].gpio, state); + } + data->state = target; + + return 0; } static struct regulator_ops gpio_regulator_voltage_ops = { diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 17d19fbbc490..46c7e88f8381 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -486,9 +486,12 @@ static int palmas_map_voltage_ldo(struct regulator_dev *rdev, { int ret, voltage; - ret = ((min_uV - 900000) / 50000) + 1; - if (ret < 0) - return ret; + if (min_uV == 0) + return 0; + + if (min_uV < 900000) + min_uV = 900000; + ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1; /* Map back into a voltage to verify we're still in bounds */ voltage = palmas_list_voltage_ldo(rdev, ret); @@ -586,7 +589,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id, addr = palmas_regs_info[id].ctrl_addr; - ret = palmas_smps_read(palmas, addr, ®); + ret = palmas_ldo_read(palmas, addr, ®); if (ret) return ret; @@ -596,7 +599,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id, if (reg_init->mode_sleep) reg |= PALMAS_LDO1_CTRL_MODE_SLEEP; - ret = palmas_smps_write(palmas, addr, reg); + ret = palmas_ldo_write(palmas, addr, reg); if (ret) return ret; @@ -630,7 +633,7 @@ static __devinit int palmas_probe(struct platform_device *pdev) ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, ®); if (ret) - goto err_unregister_regulator; + return ret; if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN) pmic->smps123 = 1; @@ -676,7 +679,9 @@ static __devinit int palmas_probe(struct platform_device *pdev) case PALMAS_REG_SMPS10: pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; pmic->desc[id].ops = &palmas_ops_smps10; - pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL; + pmic->desc[id].vsel_reg = + PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, + PALMAS_SMPS10_CTRL); pmic->desc[id].vsel_mask = SMPS10_VSEL; pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, @@ -778,8 +783,10 @@ static __devinit int palmas_probe(struct platform_device *pdev) reg_init = pdata->reg_init[id]; if (reg_init) { ret = palmas_ldo_init(palmas, id, reg_init); - if (ret) + if (ret) { + regulator_unregister(pmic->rdev[id]); goto err_unregister_regulator; + } } } } diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index e6da90ab5153..19241fc30050 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -240,14 +240,16 @@ static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), - TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), + TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), - TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), - TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), + TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1, + ENB, 1, VCC1, 2), + TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0, + ENB, 0, VCC1, 0), }; /* diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 242fe90dc565..77a71a5c17c3 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -1037,7 +1037,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); -TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08); +TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); @@ -1048,7 +1048,6 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); -TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0); TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34); TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10); TWL6025_ADJUSTABLE_SMPS(VIO, 0x16); @@ -1117,7 +1116,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = { TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6), TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN), TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB), - TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2), + TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1), TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index eb415bd76494..9592b936b71b 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { + pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); @@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work) mutex_lock(&rtc->ops_lock); again: + pm_relax(rtc->dev.parent); __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 132333d75408..4267789ca995 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); - pm_wakeup_event(cmos_rtc.dev, 0); } spin_unlock(&rtc_lock); diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index 836118795c0b..13e4df63974f 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -43,6 +43,7 @@ #include <linux/rtc.h> #include <linux/spi/spi.h> #include <linux/module.h> +#include <linux/sysfs.h> #define DRV_VERSION "0.6" @@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi) pdata->rtc = rtc; for (i = 0; i < 16; i++) { + sysfs_attr_init(&pdata->regs[i].attr.attr); sprintf(pdata->regs[i].name, "%1x", i); pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR; pdata->regs[i].attr.attr.name = pdata->regs[i].name; diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c index 77074ccd2850..fd5c7af04ae5 100644 --- a/drivers/rtc/rtc-rs5c348.c +++ b/drivers/rtc/rtc-rs5c348.c @@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); if (!pdata->rtc_24h) { - tm->tm_hour %= 12; - if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) + if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) { + tm->tm_hour -= 20; + tm->tm_hour %= 12; tm->tm_hour += 12; + } else + tm->tm_hour %= 12; } tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 6a6f76bf6e3d..b1032931a1c4 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) switch (sdias_evbuf.event_status) { case EVSTATE_ALL_STORED: TRACE("all stored\n"); + break; case EVSTATE_PART_STORED: TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); break; case EVSTATE_NO_DATA: TRACE("no data\n"); + /* fall through */ default: pr_err("Error from SCLP while copying hsa. " "Event status = %x\n", diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2d1e68db9b3f..e894ca7b54c0 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4146,45 +4146,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) static void fc_bsg_remove(struct request_queue *q) { - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - if (q) { - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); blk_cleanup_queue(q); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 09809d06eccb..fa1dfaa83e32 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct iscsi_cls_host *ihost = shost->shost_data; if (ihost->bsg_q) { - bsg_remove_queue(ihost->bsg_q); + bsg_unregister_queue(ihost->bsg_q); blk_cleanup_queue(ihost->bsg_q); } return 0; diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig index c88cbccc62b0..a305731742a9 100644 --- a/drivers/sh/intc/Kconfig +++ b/drivers/sh/intc/Kconfig @@ -1,3 +1,7 @@ +config SH_INTC + def_bool y + select IRQ_DOMAIN + comment "Interrupt controller options" config INTC_USERIMASK diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile index 44f006d09471..54ec2a0643df 100644 --- a/drivers/sh/intc/Makefile +++ b/drivers/sh/intc/Makefile @@ -1,4 +1,4 @@ -obj-y := access.o chip.o core.o handle.o virq.o +obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o obj-$(CONFIG_INTC_BALANCING) += balancing.o obj-$(CONFIG_INTC_USERIMASK) += userimask.o diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 7e562ccb6997..32c26d795ed0 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -25,6 +25,7 @@ #include <linux/stat.h> #include <linux/interrupt.h> #include <linux/sh_intc.h> +#include <linux/irqdomain.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/list.h> @@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc) BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + intc_irq_domain_init(d, hw); + /* register the vectors one by one */ for (i = 0; i < hw->nr_vectors; i++) { struct intc_vect *vect = hw->vectors + i; @@ -319,10 +322,18 @@ int __init register_intc_controller(struct intc_desc *desc) if (!vect->enum_id) continue; - res = irq_alloc_desc_at(irq, numa_node_id()); - if (res != irq && res != -EEXIST) { - pr_err("can't get irq_desc for %d\n", irq); - continue; + res = irq_create_identity_mapping(d->domain, irq); + if (unlikely(res)) { + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, irq, irq); + if (unlikely(res)) { + pr_err("domain association failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", irq); + continue; + } } intc_irq_xlate_set(irq, vect->enum_id, d); @@ -340,10 +351,21 @@ int __init register_intc_controller(struct intc_desc *desc) * IRQ support, each vector still needs to have * its own backing irq_desc. */ - res = irq_alloc_desc_at(irq2, numa_node_id()); - if (res != irq2 && res != -EEXIST) { - pr_err("can't get irq_desc for %d\n", irq2); - continue; + res = irq_create_identity_mapping(d->domain, irq2); + if (unlikely(res)) { + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, + irq, irq); + if (unlikely(res)) { + pr_err("domain association " + "failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", + irq); + continue; + } } vect2->enum_id = 0; diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h index f034a979a16f..7dff08e2a071 100644 --- a/drivers/sh/intc/internals.h +++ b/drivers/sh/intc/internals.h @@ -1,5 +1,6 @@ #include <linux/sh_intc.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/types.h> @@ -66,6 +67,7 @@ struct intc_desc_int { unsigned int nr_sense; struct intc_window *window; unsigned int nr_windows; + struct irq_domain *domain; struct irq_chip chip; bool skip_suspend; }; @@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq); void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int enable); +/* irqdomain.c */ +void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw); + /* virq.c */ void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d); void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d); diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c new file mode 100644 index 000000000000..3968f1c3c5c3 --- /dev/null +++ b/drivers/sh/intc/irqdomain.c @@ -0,0 +1,68 @@ +/* + * IRQ domain support for SH INTC subsystem + * + * Copyright (C) 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include <linux/irqdomain.h> +#include <linux/sh_intc.h> +#include <linux/export.h> +#include "internals.h" + +/** + * intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs. + * + * This takes care of exception vector to hwirq translation through + * by way of evt2irq() translation. + * + * Note: For platforms that use a flat vector space without INTEVT this + * basically just mimics irq_domain_xlate_onecell() by way of a nopped + * out evt2irq() implementation. + */ +static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (WARN_ON(intsize < 1)) + return -EINVAL; + + *out_hwirq = evt2irq(intspec[0]); + *out_type = IRQ_TYPE_NONE; + + return 0; +} + +static const struct irq_domain_ops intc_evt_ops = { + .xlate = intc_evt_xlate, +}; + +void __init intc_irq_domain_init(struct intc_desc_int *d, + struct intc_hw_desc *hw) +{ + unsigned int irq_base, irq_end; + + /* + * Quick linear revmap check + */ + irq_base = evt2irq(hw->vectors[0].vect); + irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect); + + /* + * Linear domains have a hard-wired assertion that IRQs start at + * 0 in order to make some performance optimizations. Lamely + * restrict the linear case to these conditions here, taking the + * tree penalty for linear cases with non-zero hwirq bases. + */ + if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1)) + d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, + &intc_evt_ops, NULL); + else + d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); + + BUG_ON(!d->domain); +} diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c index 0802b6c0d653..2804eaae804e 100644 --- a/drivers/sh/pfc/pinctrl.c +++ b/drivers/sh/pfc/pinctrl.c @@ -276,7 +276,6 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long config) { struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); - struct sh_pfc *pfc = pmx->pfc; /* Validate the new type */ if (config >= PINMUX_FLAG_TYPE) @@ -326,20 +325,6 @@ static struct pinctrl_desc sh_pfc_pinctrl_desc = { .confops = &sh_pfc_pinconf_ops, }; -int sh_pfc_register_pinctrl(struct sh_pfc *pfc) -{ - sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL); - if (unlikely(!sh_pfc_pmx)) - return -ENOMEM; - - spin_lock_init(&sh_pfc_pmx->lock); - - sh_pfc_pmx->pfc = pfc; - - return 0; -} -EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl); - static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx, struct pinmux_gpio *gpio, @@ -481,7 +466,6 @@ static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev) { struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev); - pinctrl_remove_gpio_range(pmx->pctl, &sh_pfc_gpio_range); pinctrl_unregister(pmx->pctl); platform_set_drvdata(pdev, NULL); @@ -507,7 +491,7 @@ static struct platform_device sh_pfc_pinctrl_device = { .id = -1, }; -static int __init sh_pfc_pinctrl_init(void) +static int sh_pfc_pinctrl_init(void) { int rc; @@ -521,10 +505,22 @@ static int __init sh_pfc_pinctrl_init(void) return rc; } +int sh_pfc_register_pinctrl(struct sh_pfc *pfc) +{ + sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL); + if (unlikely(!sh_pfc_pmx)) + return -ENOMEM; + + spin_lock_init(&sh_pfc_pmx->lock); + + sh_pfc_pmx->pfc = pfc; + + return sh_pfc_pinctrl_init(); +} +EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl); + static void __exit sh_pfc_pinctrl_exit(void) { platform_driver_unregister(&sh_pfc_pinctrl_driver); } - -subsys_initcall(sh_pfc_pinctrl_init); module_exit(sh_pfc_pinctrl_exit); diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 6e25ef1bce91..ea0aaa3f13d0 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -438,7 +438,7 @@ out: static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct bcm63xx_spi *bs = spi_master_get_devdata(master); spi_unregister_master(master); @@ -452,6 +452,8 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) platform_set_drvdata(pdev, 0); + spi_master_put(master); + return 0; } diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index b2d4b9e4e010..764bfee75920 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c @@ -533,7 +533,6 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev) iounmap(mcfqspi->iobase); release_mem_region(res->start, resource_size(res)); spi_unregister_master(master); - spi_master_put(master); return 0; } @@ -541,7 +540,7 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int mcfqspi_suspend(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mcfqspi *mcfqspi = spi_master_get_devdata(master); spi_master_suspend(master); @@ -553,7 +552,7 @@ static int mcfqspi_suspend(struct device *dev) static int mcfqspi_resume(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mcfqspi *mcfqspi = spi_master_get_devdata(master); spi_master_resume(master); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 7d46b15e1520..b2fb141da375 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -28,6 +28,8 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/omap-dma.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> @@ -39,7 +41,6 @@ #include <linux/spi/spi.h> -#include <plat/dma.h> #include <plat/clock.h> #include <plat/mcspi.h> @@ -93,8 +94,8 @@ /* We have 2 DMA channels per CS, one for RX and one for TX */ struct omap2_mcspi_dma { - int dma_tx_channel; - int dma_rx_channel; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; int dma_tx_sync_dev; int dma_rx_sync_dev; @@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) return 0; } +static void omap2_mcspi_rx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_rx_completion); + + /* We must disable the DMA RX request */ + omap2_mcspi_set_dma_req(spi, 1, 0); +} + +static void omap2_mcspi_tx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_tx_completion); + + /* We must disable the DMA TX request */ + omap2_mcspi_set_dma_req(spi, 0, 0); +} + static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; - unsigned int count, c; - unsigned long base, tx_reg, rx_reg; - int word_len, data_type, element_count; + unsigned int count; + int word_len, element_count; int elements = 0; u32 l; u8 * rx; const u8 * tx; void __iomem *chstat_reg; + struct dma_slave_config cfg; + enum dma_slave_buswidth width; + unsigned es; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; @@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + if (cs->word_len <= 8) { + width = DMA_SLAVE_BUSWIDTH_1_BYTE; + es = 1; + } else if (cs->word_len <= 16) { + width = DMA_SLAVE_BUSWIDTH_2_BYTES; + es = 2; + } else { + width = DMA_SLAVE_BUSWIDTH_4_BYTES; + es = 4; + } + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; + cfg.src_addr_width = width; + cfg.dst_addr_width = width; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; + + if (xfer->tx_buf && mcspi_dma->dma_tx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + + dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->tx_dma; + sg_dma_len(&sg) = xfer->len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_tx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + + if (xfer->rx_buf && mcspi_dma->dma_rx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + size_t len = xfer->len - es; + + dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) + len -= es; + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->rx_dma; + sg_dma_len(&sg) = len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_rx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + count = xfer->len; - c = count; word_len = cs->word_len; - base = cs->phys; - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { - data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; } else if (word_len <= 16) { - data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; } else /* word_len <= 32 */ { - data_type = OMAP_DMA_DATA_TYPE_S32; element_count = count >> 2; } if (tx != NULL) { - omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, - data_type, element_count, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_tx_sync_dev, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - tx_reg, 0, 0); - - omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->tx_dma, 0, 0); - } - - if (rx != NULL) { - elements = element_count - 1; - if (l & OMAP2_MCSPI_CHCONF_TURBO) - elements--; - - omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, elements, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_rx_sync_dev, 1); - - omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - rx_reg, 0, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->rx_dma, 0, 0); - } - - if (tx != NULL) { - omap_start_dma(mcspi_dma->dma_tx_channel); + dma_async_issue_pending(mcspi_dma->dma_tx); omap2_mcspi_set_dma_req(spi, 0, 1); } if (rx != NULL) { - omap_start_dma(mcspi_dma->dma_rx_channel); + dma_async_issue_pending(mcspi_dma->dma_rx); omap2_mcspi_set_dma_req(spi, 1, 1); } @@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) DMA_FROM_DEVICE); omap2_mcspi_set_enable(spi, 0); + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + elements--; if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) & OMAP2_MCSPI_CHSTAT_RXS)) { @@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, return 0; } -static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_rx_completion); - - /* We must disable the DMA RX request */ - omap2_mcspi_set_dma_req(spi, 1, 0); -} - -static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_tx_completion); - - /* We must disable the DMA TX request */ - omap2_mcspi_set_dma_req(spi, 0, 0); -} - static int omap2_mcspi_request_dma(struct spi_device *spi) { struct spi_master *master = spi->master; struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; + dma_cap_mask_t mask; + unsigned sig; mcspi = spi_master_get_devdata(master); mcspi_dma = mcspi->dma_channels + spi->chip_select; - if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", - omap2_mcspi_dma_rx_callback, spi, - &mcspi_dma->dma_rx_channel)) { - dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = mcspi_dma->dma_rx_sync_dev; + mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_rx) { + dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n"); return -EAGAIN; } - if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", - omap2_mcspi_dma_tx_callback, spi, - &mcspi_dma->dma_tx_channel)) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); + sig = mcspi_dma->dma_tx_sync_dev; + mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_tx) { + dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n"); + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; return -EAGAIN; } - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - return 0; } @@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) list_add_tail(&cs->node, &ctx->cs); } - if (mcspi_dma->dma_rx_channel == -1 - || mcspi_dma->dma_tx_channel == -1) { + if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { ret = omap2_mcspi_request_dma(spi); if (ret < 0) return ret; @@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) if (spi->chip_select < spi->master->num_chipselect) { mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; + if (mcspi_dma->dma_rx) { + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + if (mcspi_dma->dma_tx) { + dma_release_channel(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; } } } @@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_rx_channel = -1; mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; sprintf(dma_ch_name, "tx%d", i); dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, @@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_tx_channel = -1; mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; } @@ -1203,18 +1228,16 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) status = spi_register_master(master); if (status < 0) - goto err_spi_register; + goto disable_pm; return status; -err_spi_register: - spi_master_put(master); disable_pm: pm_runtime_disable(&pdev->dev); dma_chnl_free: kfree(mcspi->dma_channels); free_master: - kfree(master); + spi_master_put(master); platform_set_drvdata(pdev, NULL); return status; } diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index aab518ec2bbc..6abbe23c39b4 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -2053,7 +2053,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); - pm_runtime_enable(dev); pm_runtime_resume(dev); pl022->clk = clk_get(&adev->dev, NULL); diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 646a7657fe62..d1c8441f638c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -826,7 +826,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs; - struct device_node *slave_np, *data_np; + struct device_node *slave_np, *data_np = NULL; u32 fb_delay = 0; slave_np = spi->dev.of_node; @@ -1479,40 +1479,40 @@ static const struct dev_pm_ops s3c64xx_spi_pm = { s3c64xx_spi_runtime_resume, NULL) }; -struct s3c64xx_spi_port_config s3c2443_spi_port_config = { +static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { .fifo_lvl_mask = { 0x7f }, .rx_lvl_offset = 13, .tx_st_done = 21, .high_speed = true, }; -struct s3c64xx_spi_port_config s3c6410_spi_port_config = { +static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, }; -struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { +static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, }; -struct s3c64xx_spi_port_config s5pc100_spi_port_config = { +static struct s3c64xx_spi_port_config s5pc100_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, .high_speed = true, }; -struct s3c64xx_spi_port_config s5pv210_spi_port_config = { +static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, .high_speed = true, }; -struct s3c64xx_spi_port_config exynos4_spi_port_config = { +static struct s3c64xx_spi_port_config exynos4_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c index 9a60d4cd2184..f545716c666d 100644 --- a/drivers/staging/bcm/Misc.c +++ b/drivers/staging/bcm/Misc.c @@ -157,12 +157,7 @@ static int create_worker_threads(struct bcm_mini_adapter *psAdapter) static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path) { - struct file *flp = NULL; - mm_segment_t oldfs; - oldfs = get_fs(); - set_fs(get_ds()); - flp = filp_open(path, O_RDONLY, S_IRWXU); - set_fs(oldfs); + struct file *flp = filp_open(path, O_RDONLY, S_IRWXU); if (IS_ERR(flp)) { pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp)); flp = NULL; @@ -183,14 +178,12 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u { int errorno = 0; struct file *flp = NULL; - mm_segment_t oldfs; struct timeval tv = {0}; flp = open_firmware_file(Adapter, path); if (!flp) { - errorno = -ENOENT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path); - goto exit_download; + return -ENOENT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc); do_gettimeofday(&tv); @@ -201,10 +194,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u errorno = -EIO; goto exit_download; } - oldfs = get_fs(); - set_fs(get_ds()); vfs_llseek(flp, 0, 0); - set_fs(oldfs); if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!"); errorno = -EIO; @@ -212,12 +202,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u } exit_download: - oldfs = get_fs(); - set_fs(get_ds()); - if (flp && !(IS_ERR(flp))) - filp_close(flp, current->files); - set_fs(oldfs); - + filp_close(flp, NULL); return errorno; } @@ -1056,10 +1041,8 @@ OUT: static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) { struct file *flp = NULL; - mm_segment_t oldfs = {0}; char *buff; int len = 0; - loff_t pos = 0; buff = kmalloc(BUFFER_1K, GFP_KERNEL); if (!buff) @@ -1079,20 +1062,16 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) Adapter->pstargetparams = NULL; return -ENOENT; } - oldfs = get_fs(); - set_fs(get_ds()); - len = vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos); - set_fs(oldfs); + len = kernel_read(flp, 0, buff, BUFFER_1K); + filp_close(flp, NULL); if (len != sizeof(STARGETPARAMS)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n"); kfree(buff); kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; - filp_close(flp, current->files); return -ENOENT; } - filp_close(flp, current->files); /* Check for autolink in config params */ /* diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index c0fdb00783ed..2359151af7e1 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -168,7 +168,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) dev->board_ptr = comedi_recognize(driv, it->board_name); if (dev->board_ptr) break; - } else if (strcmp(driv->driver_name, it->board_name)) + } else if (strcmp(driv->driver_name, it->board_name) == 0) break; module_put(driv->module); } diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 31986608eaf1..6b4d0d68e637 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -1349,9 +1349,6 @@ static struct pci_dev *pci1710_find_pci_dev(struct comedi_device *dev, } if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH) continue; - if (pci_is_enabled(pcidev)) - continue; - if (strcmp(this_board->name, DRV_NAME) == 0) { for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if (pcidev->device == boardtypes[i].device_id) { diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c index da5ee69d2c9d..dfde0f6328dd 100644 --- a/drivers/staging/comedi/drivers/adv_pci1723.c +++ b/drivers/staging/comedi/drivers/adv_pci1723.c @@ -301,8 +301,6 @@ static struct pci_dev *pci1723_find_pci_dev(struct comedi_device *dev, } if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH) continue; - if (pci_is_enabled(pcidev)) - continue; return pcidev; } dev_err(dev->class_dev, diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c index 97f06dc8e48d..2d4cb7f638b2 100644 --- a/drivers/staging/comedi/drivers/adv_pci_dio.c +++ b/drivers/staging/comedi/drivers/adv_pci_dio.c @@ -1064,8 +1064,6 @@ static struct pci_dev *pci_dio_find_pci_dev(struct comedi_device *dev, slot != PCI_SLOT(pcidev->devfn)) continue; } - if (pci_is_enabled(pcidev)) - continue; for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if (boardtypes[i].vendor_id != pcidev->vendor) continue; diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c index ef28385c1482..cad559a1a730 100644 --- a/drivers/staging/comedi/drivers/daqboard2000.c +++ b/drivers/staging/comedi/drivers/daqboard2000.c @@ -718,7 +718,8 @@ static struct pci_dev *daqboard2000_find_pci_dev(struct comedi_device *dev, continue; } if (pcidev->vendor != PCI_VENDOR_ID_IOTECH || - pcidev->device != 0x0409) + pcidev->device != 0x0409 || + pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) continue; for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { @@ -739,6 +740,7 @@ static int daqboard2000_attach(struct comedi_device *dev, { struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; void *aux_data; unsigned int aux_len; int result; @@ -758,11 +760,12 @@ static int daqboard2000_attach(struct comedi_device *dev, "failed to enable PCI device and request regions\n"); return -EIO; } - dev->iobase = pci_resource_start(pcidev, 2); + dev->iobase = 1; /* the "detach" needs this */ - devpriv->plx = - ioremap(pci_resource_start(pcidev, 0), DAQBOARD2000_PLX_SIZE); - devpriv->daq = ioremap(dev->iobase, DAQBOARD2000_DAQ_SIZE); + pci_base = pci_resource_start(pcidev, 0); + devpriv->plx = ioremap(pci_base, DAQBOARD2000_PLX_SIZE); + pci_base = pci_resource_start(pcidev, 2); + devpriv->daq = ioremap(pci_base, DAQBOARD2000_DAQ_SIZE); if (!devpriv->plx || !devpriv->daq) return -ENOMEM; @@ -799,8 +802,6 @@ static int daqboard2000_attach(struct comedi_device *dev, printk("Interrupt after is: %x\n", interrupt); */ - dev->iobase = (unsigned long)devpriv->daq; - dev->board_name = this_board->name; s = dev->subdevices + 0; @@ -824,7 +825,7 @@ static int daqboard2000_attach(struct comedi_device *dev, s = dev->subdevices + 2; result = subdev_8255_init(dev, s, daqboard2000_8255_cb, - (unsigned long)(dev->iobase + 0x40)); + (unsigned long)(devpriv->daq + 0x40)); out: return result; diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index a6fe6c9be87e..3476cda0fff0 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -804,6 +804,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; int ret = 0; dev_dbg(dev->class_dev, "dt3000:\n"); @@ -820,9 +821,10 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it) ret = comedi_pci_enable(pcidev, "dt3000"); if (ret < 0) return ret; + dev->iobase = 1; /* the "detach" needs this */ - dev->iobase = pci_resource_start(pcidev, 0); - devpriv->io_addr = ioremap(dev->iobase, DT3000_SIZE); + pci_base = pci_resource_start(pcidev, 0); + devpriv->io_addr = ioremap(pci_base, DT3000_SIZE); if (!devpriv->io_addr) return -ENOMEM; diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c index 112fdc3e9c69..5aa8be1e7b92 100644 --- a/drivers/staging/comedi/drivers/rtd520.c +++ b/drivers/staging/comedi/drivers/rtd520.c @@ -1619,9 +1619,8 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it) struct rtdPrivate *devpriv; struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; int ret; - resource_size_t physLas1; /* data area */ - resource_size_t physLcfg; /* PLX9080 */ #ifdef USE_DMA int index; #endif @@ -1655,20 +1654,15 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it) printk(KERN_INFO "Failed to enable PCI device and request regions.\n"); return ret; } - - /* - * Initialize base addresses - */ - /* Get the physical address from PCI config */ - dev->iobase = pci_resource_start(pcidev, LAS0_PCIINDEX); - physLas1 = pci_resource_start(pcidev, LAS1_PCIINDEX); - physLcfg = pci_resource_start(pcidev, LCFG_PCIINDEX); - /* Now have the kernel map this into memory */ - /* ASSUME page aligned */ - devpriv->las0 = ioremap_nocache(dev->iobase, LAS0_PCISIZE); - devpriv->las1 = ioremap_nocache(physLas1, LAS1_PCISIZE); - devpriv->lcfg = ioremap_nocache(physLcfg, LCFG_PCISIZE); - + dev->iobase = 1; /* the "detach" needs this */ + + /* Initialize the base addresses */ + pci_base = pci_resource_start(pcidev, LAS0_PCIINDEX); + devpriv->las0 = ioremap_nocache(pci_base, LAS0_PCISIZE); + pci_base = pci_resource_start(pcidev, LAS1_PCIINDEX); + devpriv->las1 = ioremap_nocache(pci_base, LAS1_PCISIZE); + pci_base = pci_resource_start(pcidev, LCFG_PCIINDEX); + devpriv->lcfg = ioremap_nocache(pci_base, LCFG_PCISIZE); if (!devpriv->las0 || !devpriv->las1 || !devpriv->lcfg) return -ENOMEM; diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index 848c7ec06976..11ee83681da7 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -102,6 +102,7 @@ sampling rate. If you sample two channels you get 4kHz and so on. #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ +#define FIRMWARE "usbdux_firmware.bin" #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -2791,7 +2792,7 @@ static int usbdux_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbdux_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxsub + index, @@ -2850,3 +2851,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index d9911588c10a..8eb41257c6ce 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -57,6 +57,7 @@ /* * constants for "firmware" upload and download */ +#define FIRMWARE "usbduxfast_firmware.bin" #define USBDUXFASTSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -1706,7 +1707,7 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbduxfast_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxfastsub + index, @@ -1774,3 +1775,4 @@ module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index 543e604791e2..f54ab8c2fcfd 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -63,6 +63,7 @@ Status: testing #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ +#define FIRMWARE "usbduxsigma_firmware.bin" #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -2780,7 +2781,7 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbduxsigma_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxsub + index, @@ -2845,3 +2846,4 @@ module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/drivers/staging/csr/Kconfig b/drivers/staging/csr/Kconfig index cee8d48d2af9..ad2a1096e920 100644 --- a/drivers/staging/csr/Kconfig +++ b/drivers/staging/csr/Kconfig @@ -1,6 +1,6 @@ config CSR_WIFI tristate "CSR wireless driver" - depends on MMC && CFG80211_WEXT + depends on MMC && CFG80211_WEXT && INET select WIRELESS_EXT select WEXT_PRIV help diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c index 760efee23d4a..65624bca8b3a 100644 --- a/drivers/staging/gdm72xx/sdio_boot.c +++ b/drivers/staging/gdm72xx/sdio_boot.c @@ -66,9 +66,8 @@ static int download_image(struct sdio_func *func, char *img_name) return -ENOENT; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -123,7 +122,7 @@ static int download_image(struct sdio_func *func, char *img_name) pno++; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); return ret; } diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c index fef290c38db6..e3dbd5a552ca 100644 --- a/drivers/staging/gdm72xx/usb_boot.c +++ b/drivers/staging/gdm72xx/usb_boot.c @@ -173,14 +173,12 @@ int usb_boot(struct usb_device *usbdev, u16 pid) filp = filp_open(img_name, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { printk(KERN_ERR "Can't find %s.\n", img_name); - set_fs(fs); ret = PTR_ERR(filp); goto restore_fs; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -262,7 +260,7 @@ int usb_boot(struct usb_device *usbdev, u16 pid) ret = -EINVAL; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); @@ -322,13 +320,11 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto restore_fs; } - if (filp->f_dentry) { - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { - printk(KERN_ERR "Invalid file type: %s\n", path); - ret = -EINVAL; - goto out; - } + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { + printk(KERN_ERR "Invalid file type: %s\n", path); + ret = -EINVAL; + goto out; } buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL); @@ -364,7 +360,7 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto out; out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index 22c3923d55eb..095837285f4f 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -754,7 +754,7 @@ static ssize_t ad7192_set(struct device *dev, else st->mode &= ~AD7192_MODE_ACX; - ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode); + ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode); break; default: ret = -EINVAL; @@ -798,6 +798,11 @@ static const struct attribute_group ad7195_attribute_group = { .attrs = ad7195_attributes, }; +static unsigned int ad7192_get_temp_scale(bool unipolar) +{ + return unipolar ? 2815 * 2 : 2815; +} + static int ad7192_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, @@ -824,19 +829,6 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); - switch (chan->type) { - case IIO_VOLTAGE: - if (!unipolar) - *val -= (1 << (chan->scan_type.realbits - 1)); - break; - case IIO_TEMP: - *val -= 0x800000; - *val /= 2815; /* temp Kelvin */ - *val -= 273; /* temp Celsius */ - break; - default: - return -EINVAL; - } return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -848,11 +840,21 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: - *val = 1000; - return IIO_VAL_INT; + *val = 0; + *val2 = 1000000000 / ad7192_get_temp_scale(unipolar); + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } + case IIO_CHAN_INFO_OFFSET: + if (!unipolar) + *val = -(1 << (chan->scan_type.realbits - 1)); + else + *val = 0; + /* Kelvin to Celsius */ + if (chan->type == IIO_TEMP) + *val -= 273 * ad7192_get_temp_scale(unipolar); + return IIO_VAL_INT; } return -EINVAL; @@ -890,7 +892,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev, } ret = 0; } - + break; default: ret = -EINVAL; } @@ -942,20 +944,22 @@ static const struct iio_info ad7195_info = { .channel = _chan, \ .channel2 = _chan2, \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ - IIO_CHAN_INFO_SCALE_SHARED_BIT, \ + IIO_CHAN_INFO_SCALE_SHARED_BIT | \ + IIO_CHAN_INFO_OFFSET_SHARED_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} #define AD7192_CHAN(_chan, _address, _si) \ { .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _chan, \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ - IIO_CHAN_INFO_SCALE_SHARED_BIT, \ + IIO_CHAN_INFO_SCALE_SHARED_BIT | \ + IIO_CHAN_INFO_OFFSET_SHARED_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} #define AD7192_CHAN_TEMP(_chan, _address, _si) \ { .type = IIO_TEMP, \ @@ -965,7 +969,7 @@ static const struct iio_info ad7195_info = { IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} static struct iio_chan_spec ad7192_channels[] = { AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0), diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c index fd1d855ff57a..506016f01593 100644 --- a/drivers/staging/iio/adc/ad7298_ring.c +++ b/drivers/staging/iio/adc/ad7298_ring.c @@ -76,7 +76,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ad7298_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; - s64 time_ns; + s64 time_ns = 0; __u16 buf[16]; int b_sent, i; diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c index 1ece2ac8de56..19ee49c95de4 100644 --- a/drivers/staging/iio/adc/ad7780.c +++ b/drivers/staging/iio/adc/ad7780.c @@ -131,9 +131,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_type = { - .sign = 's', + .sign = 'u', .realbits = 24, .storagebits = 32, .shift = 8, @@ -146,9 +147,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_type = { - .sign = 's', + .sign = 'u', .realbits = 20, .storagebits = 32, .shift = 12, diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c index 76fdd7145fc5..112e2b7b5bc4 100644 --- a/drivers/staging/iio/adc/ad7793.c +++ b/drivers/staging/iio/adc/ad7793.c @@ -563,8 +563,9 @@ static ssize_t ad7793_show_scale_available(struct device *dev, return len; } -static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available, - S_IRUGO, ad7793_show_scale_available, NULL, 0); +static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, + in_voltage-voltage_scale_available, S_IRUGO, + ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, @@ -604,9 +605,6 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); - if (!unipolar) - *val -= (1 << (chan->scan_type.realbits - 1)); - return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -620,25 +618,38 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_NANO; } else { /* 1170mV / 2^23 * 6 */ - scale_uv = (1170ULL * 100000000ULL * 6ULL) - >> (chan->scan_type.realbits - - (unipolar ? 0 : 1)); + scale_uv = (1170ULL * 100000000ULL * 6ULL); } break; case IIO_TEMP: - /* Always uses unity gain and internal ref */ - scale_uv = (2500ULL * 100000000ULL) - >> (chan->scan_type.realbits - - (unipolar ? 0 : 1)); + /* 1170mV / 0.81 mV/C / 2^23 */ + scale_uv = 1444444444444ULL; break; default: return -EINVAL; } - *val2 = do_div(scale_uv, 100000000) * 10; - *val = scale_uv; - + scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1)); + *val = 0; + *val2 = scale_uv; return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_OFFSET: + if (!unipolar) + *val = -(1 << (chan->scan_type.realbits - 1)); + else + *val = 0; + + /* Kelvin to Celsius */ + if (chan->type == IIO_TEMP) { + unsigned long long offset; + unsigned int shift; + + shift = chan->scan_type.realbits - (unipolar ? 0 : 1); + offset = 273ULL << shift; + do_div(offset, 1444); + *val -= offset; + } + return IIO_VAL_INT; } return -EINVAL; } @@ -676,7 +687,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, } ret = 0; } - + break; default: ret = -EINVAL; } @@ -720,9 +731,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 0, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, @@ -732,9 +744,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 1, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, @@ -744,9 +757,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 2, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, @@ -757,9 +771,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 3, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[4] = { .type = IIO_TEMP, @@ -769,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, - .scan_type = IIO_ST('s', 24, 32, 0), + .scan_type = IIO_ST('u', 24, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, @@ -778,9 +793,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SEPARATE_BIT, + IIO_CHAN_INFO_SCALE_SEPARATE_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 5, - .scan_type = IIO_ST('s', 24, 32, 0), + .scan_type = IIO_ST('u', 24, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, @@ -793,9 +809,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 0, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, @@ -805,9 +822,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 1, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, @@ -817,9 +835,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 2, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, @@ -830,9 +849,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 3, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[4] = { .type = IIO_TEMP, @@ -842,7 +862,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, - .scan_type = IIO_ST('s', 16, 32, 0), + .scan_type = IIO_ST('u', 16, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, @@ -851,9 +871,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SEPARATE_BIT, + IIO_CHAN_INFO_SCALE_SEPARATE_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 5, - .scan_type = IIO_ST('s', 16, 32, 0), + .scan_type = IIO_ST('u', 16, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, @@ -901,7 +922,7 @@ static int __devinit ad7793_probe(struct spi_device *spi) else if (voltage_uv) st->int_vref_mv = voltage_uv / 1000; else - st->int_vref_mv = 2500; /* Build-in ref */ + st->int_vref_mv = 1170; /* Build-in ref */ spi_set_drvdata(spi, indio_dev); st->spi = spi; diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 992275c0d87c..2c4bd746715a 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -27,6 +27,7 @@ #include <linux/uaccess.h> #include <linux/ctype.h> #include <linux/reboot.h> +#include <linux/olpc-ec.h> #include <asm/tsc.h> #include <asm/olpc.h> diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index b06fd5b723fa..d536756549e6 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -189,7 +189,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode"); // Static vars definitions // -static struct usb_device_id vt6656_table[] __devinitdata = { +static struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index ef360547ecec..0ca857ac473e 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); -static const struct usb_device_id wb35_table[] __devinitconst = { +static const struct usb_device_id wb35_table[] = { { USB_DEVICE(0x0416, 0x0035) }, { USB_DEVICE(0x18E8, 0x6201) }, { USB_DEVICE(0x18E8, 0x6206) }, diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 9e2100551c78..cbb5aaf3e567 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -109,46 +109,29 @@ static struct se_device *fd_create_virtdevice( struct se_subsystem_dev *se_dev, void *p) { - char *dev_p = NULL; struct se_device *dev; struct se_dev_limits dev_limits; struct queue_limits *limits; struct fd_dev *fd_dev = p; struct fd_host *fd_host = hba->hba_ptr; - mm_segment_t old_fs; struct file *file; struct inode *inode = NULL; int dev_flags = 0, flags, ret = -EINVAL; memset(&dev_limits, 0, sizeof(struct se_dev_limits)); - old_fs = get_fs(); - set_fs(get_ds()); - dev_p = getname(fd_dev->fd_dev_name); - set_fs(old_fs); - - if (IS_ERR(dev_p)) { - pr_err("getname(%s) failed: %lu\n", - fd_dev->fd_dev_name, IS_ERR(dev_p)); - ret = PTR_ERR(dev_p); - goto fail; - } /* * Use O_DSYNC by default instead of O_SYNC to forgo syncing * of pure timestamp updates. */ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; - file = filp_open(dev_p, flags, 0600); + file = filp_open(fd_dev->fd_dev_name, flags, 0600); if (IS_ERR(file)) { - pr_err("filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); ret = PTR_ERR(file); goto fail; } - if (!file || !file->f_dentry) { - pr_err("filp_open(%s) failed\n", dev_p); - goto fail; - } fd_dev->fd_file = file; /* * If using a block backend with this struct file, we extract @@ -212,14 +195,12 @@ static struct se_device *fd_create_virtdevice( " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); - putname(dev_p); return dev; fail: if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } - putname(dev_p); return ERR_PTR(ret); } @@ -452,14 +433,11 @@ static ssize_t fd_set_configfs_dev_params( token = match_token(ptr, tokens, args); switch (token) { case Opt_fd_dev_name: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; + if (match_strlcpy(fd_dev->fd_dev_name, &args[0], + FD_MAX_DEV_NAME) == 0) { + ret = -EINVAL; break; } - snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, - "%s", arg_p); - kfree(arg_p); pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 070b442c1f81..4720b4ba096a 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -160,10 +160,12 @@ config SERIAL_KS8695_CONSOLE config SERIAL_CLPS711X tristate "CLPS711X serial port support" - depends on ARM && ARCH_CLPS711X + depends on ARCH_CLPS711X select SERIAL_CORE + default y help - ::: To be written ::: + This enables the driver for the on-chip UARTs of the Cirrus + Logic EP711x/EP721x/EP731x processors. config SERIAL_CLPS711X_CONSOLE bool "Support for console on CLPS711X serial port" @@ -173,9 +175,7 @@ config SERIAL_CLPS711X_CONSOLE Even if you say Y here, the currently visible virtual console (/dev/tty0) will still be used as the system console by default, but you can alter that using a kernel command line option such as - "console=ttyCL1". (Try "man bootparam" or see the documentation of - your boot loader (lilo or loadlin) about how to pass options to the - kernel at boot time.) + "console=ttyCL1". config SERIAL_SAMSUNG tristate "Samsung SoC serial support" diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 144cd3987d4c..3ad079ffd049 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -1331,7 +1331,7 @@ static const struct spi_device_id ifx_id_table[] = { MODULE_DEVICE_TABLE(spi, ifx_id_table); /* spi operations */ -static const struct spi_driver ifx_spi_driver = { +static struct spi_driver ifx_spi_driver = { .driver = { .name = DRVNAME, .pm = &ifx_spi_pm, diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 2e341b81ff89..3a667eed63d6 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -73,6 +73,7 @@ #define AUART_CTRL0_CLKGATE (1 << 30) #define AUART_CTRL2_CTSEN (1 << 15) +#define AUART_CTRL2_RTSEN (1 << 14) #define AUART_CTRL2_RTS (1 << 11) #define AUART_CTRL2_RXE (1 << 9) #define AUART_CTRL2_TXE (1 << 8) @@ -259,9 +260,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) u32 ctrl = readl(u->membase + AUART_CTRL2); - ctrl &= ~AUART_CTRL2_RTS; - if (mctrl & TIOCM_RTS) - ctrl |= AUART_CTRL2_RTS; + ctrl &= ~AUART_CTRL2_RTSEN; + if (mctrl & TIOCM_RTS) { + if (u->state->port.flags & ASYNC_CTS_FLOW) + ctrl |= AUART_CTRL2_RTSEN; + } + s->ctrl = mctrl; writel(ctrl, u->membase + AUART_CTRL2); } @@ -359,9 +363,9 @@ static void mxs_auart_settermios(struct uart_port *u, /* figure out the hardware flow control settings */ if (cflag & CRTSCTS) - ctrl2 |= AUART_CTRL2_CTSEN; + ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN; else - ctrl2 &= ~AUART_CTRL2_CTSEN; + ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN); /* set baud rate */ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk); diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 654755a990df..333c8d012b0e 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -1348,10 +1348,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser) static int pmz_poll_get_char(struct uart_port *port) { struct uart_pmac_port *uap = (struct uart_pmac_port *)port; + int tries = 2; - while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) - udelay(5); - return read_zsdata(uap); + while (tries) { + if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0) + return read_zsdata(uap); + if (tries--) + udelay(5); + } + + return NO_POLL_CHAR; } static void pmz_poll_put_char(struct uart_port *port, unsigned char c) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index d4d8c9453cd8..9be296cf7295 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/errno.h> +#include <linux/sh_dma.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> @@ -1410,8 +1411,8 @@ static void work_fn_rx(struct work_struct *work) /* Handle incomplete DMA receive */ struct tty_struct *tty = port->state->port.tty; struct dma_chan *chan = s->chan_rx; - struct sh_desc *sh_desc = container_of(desc, struct sh_desc, - async_tx); + struct shdma_desc *sh_desc = container_of(desc, + struct shdma_desc, async_tx); unsigned long flags; int count; diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index a7773a3e02b1..7065df6036ca 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -13,7 +13,7 @@ config USB_ARCH_HAS_OHCI default y if PXA3xx default y if ARCH_EP93XX default y if ARCH_AT91 - default y if ARCH_PNX4008 && I2C + default y if ARCH_PNX4008 default y if MFD_TC6393XB default y if ARCH_W90X900 default y if ARCH_DAVINCI_DA8XX diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 8337fb5d988d..47e499c9c0b6 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig @@ -1,9 +1,9 @@ config USB_CHIPIDEA tristate "ChipIdea Highspeed Dual Role Controller" - depends on USB + depends on USB || USB_GADGET help - Say Y here if your system has a dual role high speed USB - controller based on ChipIdea silicon IP. Currently, only the + Say Y here if your system has a dual role high speed USB + controller based on ChipIdea silicon IP. Currently, only the peripheral mode is supported. When compiled dynamically, the module will be called ci-hdrc.ko. @@ -12,7 +12,7 @@ if USB_CHIPIDEA config USB_CHIPIDEA_UDC bool "ChipIdea device controller" - depends on USB_GADGET + depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA select USB_GADGET_DUALSPEED help Say Y here to enable device controller functionality of the @@ -20,6 +20,7 @@ config USB_CHIPIDEA_UDC config USB_CHIPIDEA_HOST bool "ChipIdea host controller" + depends on USB=y || USB=USB_CHIPIDEA select USB_EHCI_ROOT_HUB_TT help Say Y here to enable host controller functionality of the diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 56d6bf668488..f763ed7ba91e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1104,7 +1104,8 @@ skip_normal_probe: } - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) + if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || + control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index ee0ebacf8227..89dcf155d57e 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void) writel(FLAG_CF, &ehci_regs->configured_flag); /* Wait until the controller is no longer halted */ - loop = 10; + loop = 1000; do { status = readl(&ehci_regs->status); if (!(status & STS_HALT)) diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index ae8b18869b8c..8d9bcd8207c8 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -656,9 +656,8 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (!(filp->f_mode & FMODE_WRITE)) ro = 1; - if (filp->f_path.dentry) - inode = filp->f_path.dentry->d_inode; - if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { + inode = filp->f_path.dentry->d_inode; + if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } @@ -667,7 +666,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ - if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) { + if (!(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } @@ -712,7 +711,6 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (fsg_lun_is_open(curlun)) fsg_lun_close(curlun); - get_file(filp); curlun->blksize = blksize; curlun->blkbits = blkbits; curlun->ro = ro; @@ -720,10 +718,10 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); - rc = 0; + return 0; out: - filp_close(filp, current->files); + fput(filp); return rc; } diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 90e82e288eb9..0e5230926154 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net) spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { struct gether *link = dev->port_usb; + const struct usb_endpoint_descriptor *in; + const struct usb_endpoint_descriptor *out; if (link->close) link->close(link); @@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net) * their own pace; the network stack can handle old packets. * For the moment we leave this here, since it works. */ + in = link->in_ep->desc; + out = link->out_ep->desc; usb_ep_disable(link->in_ep); usb_ep_disable(link->out_ep); if (netif_carrier_ok(net)) { DBG(dev, "host still using in/out endpoints\n"); + link->in_ep->desc = in; + link->out_ep->desc = out; usb_ep_enable(link->in_ep); usb_ep_enable(link->out_ep); } diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c index af9898982059..e0c5e88e03ed 100644 --- a/drivers/usb/gadget/u_uac1.c +++ b/drivers/usb/gadget/u_uac1.c @@ -275,17 +275,17 @@ static int gaudio_close_snd_dev(struct gaudio *gau) /* Close control device */ snd = &gau->control; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM playback device and setup substream */ snd = &gau->playback; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM capture device and setup substream */ snd = &gau->capture; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); return 0; } diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index bb55eb4a7d48..d7fe287d0678 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -56,15 +56,6 @@ #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 -/* Errata i693 */ -static struct clk *utmi_p1_fck; -static struct clk *utmi_p2_fck; -static struct clk *xclk60mhsp1_ck; -static struct clk *xclk60mhsp2_ck; -static struct clk *usbhost_p1_fck; -static struct clk *usbhost_p2_fck; -static struct clk *init_60m_fclk; - /*-------------------------------------------------------------------------*/ static const struct hc_driver ehci_omap_hc_driver; @@ -80,40 +71,6 @@ static inline u32 ehci_read(void __iomem *base, u32 reg) return __raw_readl(base + reg); } -/* Erratum i693 workaround sequence */ -static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) -{ - int ret = 0; - - /* Switch to the internal 60 MHz clock */ - ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); - if (ret != 0) - ehci_err(ehci, "init_60m_fclk set parent" - "failed error:%d\n", ret); - - ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); - if (ret != 0) - ehci_err(ehci, "init_60m_fclk set parent" - "failed error:%d\n", ret); - - clk_enable(usbhost_p1_fck); - clk_enable(usbhost_p2_fck); - - /* Wait 1ms and switch back to the external clock */ - mdelay(1); - ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); - if (ret != 0) - ehci_err(ehci, "xclk60mhsp1_ck set parent" - "failed error:%d\n", ret); - - ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); - if (ret != 0) - ehci_err(ehci, "xclk60mhsp2_ck set parent" - "failed error:%d\n", ret); - - clk_disable(usbhost_p1_fck); - clk_disable(usbhost_p2_fck); -} static void omap_ehci_soft_phy_reset(struct usb_hcd *hcd, u8 port) { @@ -195,50 +152,6 @@ static int omap_ehci_init(struct usb_hcd *hcd) return rc; } -static int omap_ehci_hub_control( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength -) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - u32 __iomem *status_reg = &ehci->regs->port_status[ - (wIndex & 0xff) - 1]; - u32 temp; - unsigned long flags; - int retval = 0; - - spin_lock_irqsave(&ehci->lock, flags); - - if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { - temp = ehci_readl(ehci, status_reg); - if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { - retval = -EPIPE; - goto done; - } - - temp &= ~PORT_WKCONN_E; - temp |= PORT_WKDISC_E | PORT_WKOC_E; - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); - - omap_ehci_erratum_i693(ehci); - - set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); - goto done; - } - - spin_unlock_irqrestore(&ehci->lock, flags); - - /* Handle the hub control events here */ - return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); -done: - spin_unlock_irqrestore(&ehci->lock, flags); - return retval; -} - static void disable_put_regulator( struct ehci_hcd_omap_platform_data *pdata) { @@ -351,79 +264,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) goto err_pm_runtime; } - /* get clocks */ - utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); - if (IS_ERR(utmi_p1_fck)) { - ret = PTR_ERR(utmi_p1_fck); - dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); - goto err_add_hcd; - } - - xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); - if (IS_ERR(xclk60mhsp1_ck)) { - ret = PTR_ERR(xclk60mhsp1_ck); - dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); - goto err_utmi_p1_fck; - } - - utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); - if (IS_ERR(utmi_p2_fck)) { - ret = PTR_ERR(utmi_p2_fck); - dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); - goto err_xclk60mhsp1_ck; - } - - xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); - if (IS_ERR(xclk60mhsp2_ck)) { - ret = PTR_ERR(xclk60mhsp2_ck); - dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); - goto err_utmi_p2_fck; - } - - usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); - if (IS_ERR(usbhost_p1_fck)) { - ret = PTR_ERR(usbhost_p1_fck); - dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); - goto err_xclk60mhsp2_ck; - } - - usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); - if (IS_ERR(usbhost_p2_fck)) { - ret = PTR_ERR(usbhost_p2_fck); - dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); - goto err_usbhost_p1_fck; - } - - init_60m_fclk = clk_get(dev, "init_60m_fclk"); - if (IS_ERR(init_60m_fclk)) { - ret = PTR_ERR(init_60m_fclk); - dev_err(dev, "init_60m_fclk failed error:%d\n", ret); - goto err_usbhost_p2_fck; - } return 0; -err_usbhost_p2_fck: - clk_put(usbhost_p2_fck); - -err_usbhost_p1_fck: - clk_put(usbhost_p1_fck); - -err_xclk60mhsp2_ck: - clk_put(xclk60mhsp2_ck); - -err_utmi_p2_fck: - clk_put(utmi_p2_fck); - -err_xclk60mhsp1_ck: - clk_put(xclk60mhsp1_ck); - -err_utmi_p1_fck: - clk_put(utmi_p1_fck); - -err_add_hcd: - usb_remove_hcd(hcd); - err_pm_runtime: disable_put_regulator(pdata); pm_runtime_put_sync(dev); @@ -454,14 +297,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) iounmap(hcd->regs); usb_put_hcd(hcd); - clk_put(utmi_p1_fck); - clk_put(utmi_p2_fck); - clk_put(xclk60mhsp1_ck); - clk_put(xclk60mhsp2_ck); - clk_put(usbhost_p1_fck); - clk_put(usbhost_p2_fck); - clk_put(init_60m_fclk); - pm_runtime_put_sync(dev); pm_runtime_disable(dev); @@ -532,7 +367,7 @@ static const struct hc_driver ehci_omap_hc_driver = { * root hub support */ .hub_status_data = ehci_hub_status_data, - .hub_control = omap_ehci_hub_control, + .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c index 58c96bd50d22..0c9e43cfaff5 100644 --- a/drivers/usb/host/ehci-sead3.c +++ b/drivers/usb/host/ehci-sead3.c @@ -40,7 +40,7 @@ static int ehci_sead3_setup(struct usb_hcd *hcd) ehci->need_io_watchdog = 0; /* Set burst length to 16 words. */ - ehci_writel(ehci, 0x1010, &ehci->regs->reserved[1]); + ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]); return ret; } diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 950e95efa381..26dedb30ad0b 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -799,11 +799,12 @@ static int tegra_ehci_remove(struct platform_device *pdev) #endif usb_remove_hcd(hcd); - usb_put_hcd(hcd); tegra_usb_phy_close(tegra->phy); iounmap(hcd->regs); + usb_put_hcd(hcd); + clk_disable_unprepare(tegra->clk); clk_put(tegra->clk); diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 2ed112d3e159..256326322cfd 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -543,12 +543,12 @@ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); + /* save the data underrun error code for later and + * proceed with the status stage + */ + urb->actual_length += PTD_GET_COUNT(ptd); if (usb_pipecontrol(urb->pipe)) { ep->nextpid = USB_PID_ACK; - /* save the data underrun error code for later and - * proceed with the status stage - */ - urb->actual_length += PTD_GET_COUNT(ptd); BUG_ON(urb->actual_length > urb->transfer_buffer_length); if (urb->status == -EINPROGRESS) diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index e7d75d295988..f8b2d91851f7 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -403,8 +403,6 @@ err0: static inline void usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev) { - struct ohci_hcd *ohci = hcd_to_ohci (hcd); - usb_remove_hcd(hcd); if (!IS_ERR_OR_NULL(hcd->phy)) { (void) otg_set_host(hcd->phy->otg, 0); diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index df0828cb2aa3..c5e9e4a76f14 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -800,6 +800,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) } EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); + /** * PCI Quirks for xHCI. * diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b1002a8ef96f..ef004a5de20f 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -10,6 +10,7 @@ void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); #else static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 18b231b0c5d3..9bfd4ca1153c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -94,11 +94,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; + /* + * PPT desktop boards DH77EB and DH77DF will power back on after + * a few seconds of being shutdown. The fix for this is to + * switch the ports from xHCI to EHCI on shutdown. We can't use + * DMI information to find those particular boards (since each + * vendor will change the board name), so we have to key off all + * PPT chipsets. + */ + xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); + xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 8275645889da..643c2f3f3e73 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -145,29 +145,37 @@ static void next_trb(struct xhci_hcd *xhci, */ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) { - union xhci_trb *next; unsigned long long addr; ring->deq_updates++; - /* If this is not event ring, there is one more usable TRB */ + /* + * If this is not event ring, and the dequeue pointer + * is not on a link TRB, there is one more usable TRB + */ if (ring->type != TYPE_EVENT && !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) ring->num_trbs_free++; - next = ++(ring->dequeue); - /* Update the dequeue pointer further if that was a link TRB or we're at - * the end of an event ring segment (which doesn't have link TRBS) - */ - while (last_trb(xhci, ring, ring->deq_seg, next)) { - if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci, - ring, ring->deq_seg, next)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); + do { + /* + * Update the dequeue pointer further if that was a link TRB or + * we're at the end of an event ring segment (which doesn't have + * link TRBS) + */ + if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { + if (ring->type == TYPE_EVENT && + last_trb_on_last_seg(xhci, ring, + ring->deq_seg, ring->dequeue)) { + ring->cycle_state = (ring->cycle_state ? 0 : 1); + } + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } else { + ring->dequeue++; } - ring->deq_seg = ring->deq_seg->next; - ring->dequeue = ring->deq_seg->trbs; - next = ring->dequeue; - } + } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); + addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); } @@ -2073,8 +2081,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (xhci->quirks & XHCI_TRUST_TX_LENGTH) trb_comp_code = COMP_SHORT_TX; else - xhci_warn(xhci, "WARN Successful completion on short TX: " - "needs XHCI_TRUST_TX_LENGTH quirk?\n"); + xhci_warn_ratelimited(xhci, + "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); case COMP_SHORT_TX: break; case COMP_STOP: diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7648b2d4b268..c59d5b5b6c7d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -166,7 +166,7 @@ int xhci_reset(struct xhci_hcd *xhci) xhci_writel(xhci, command, &xhci->op_regs->command); ret = handshake(xhci, &xhci->op_regs->command, - CMD_RESET, 0, 250 * 1000); + CMD_RESET, 0, 10 * 1000 * 1000); if (ret) return ret; @@ -175,7 +175,8 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); + ret = handshake(xhci, &xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); for (i = 0; i < 2; ++i) { xhci->bus_state[i].port_c_suspend = 0; @@ -658,6 +659,9 @@ void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + if (xhci->quirks && XHCI_SPURIOUS_REBOOT) + usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); + spin_lock_irq(&xhci->lock); xhci_halt(xhci); spin_unlock_irq(&xhci->lock); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 55c0785810c9..c713256297ac 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1494,6 +1494,7 @@ struct xhci_hcd { #define XHCI_TRUST_TX_LENGTH (1 << 10) #define XHCI_LPM_SUPPORT (1 << 11) #define XHCI_INTEL_HOST (1 << 12) +#define XHCI_SPURIOUS_REBOOT (1 << 13) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1537,6 +1538,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_warn(xhci, fmt, args...) \ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_warn_ratelimited(xhci, fmt, args...) \ + dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) /* TODO: copied from ehci.h - can be refactored? */ /* xHCI spec says all registers are little endian */ diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index ff08015b230c..ae794b90766b 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c @@ -232,7 +232,7 @@ wraperr: return err; } -static const struct usb_device_id id_table[] __devinitconst = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index ef0c3f9f0947..6259f0d99324 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -8,7 +8,7 @@ config USB_MUSB_HDRC tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' depends on USB && USB_GADGET select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) - select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX) + select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX) select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS @@ -57,7 +57,7 @@ config USB_MUSB_AM35X config USB_MUSB_DSPS tristate "TI DSPS platforms" - depends on SOC_OMAPTI81XX || SOC_OMAPAM33XX + depends on SOC_TI81XX || SOC_AM33XX config USB_MUSB_BLACKFIN tristate "Blackfin" diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 217808d9fbe1..494772fc9e23 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -479,9 +479,9 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) ret = -ENODEV; goto err0; } - strcpy((u8 *)res->name, "mc"); res->parent = NULL; resources[1] = *res; + resources[1].name = "mc"; /* allocate the child platform device */ musb = platform_device_alloc("musb-hdrc", -1); @@ -566,27 +566,28 @@ static int __devinit dsps_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, glue); - /* create the child platform device for first instances of musb */ - ret = dsps_create_musb_pdev(glue, 0); - if (ret != 0) { - dev_err(&pdev->dev, "failed to create child pdev\n"); - goto err2; - } - /* enable the usbss clocks */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); + goto err2; + } + + /* create the child platform device for first instances of musb */ + ret = dsps_create_musb_pdev(glue, 0); + if (ret != 0) { + dev_err(&pdev->dev, "failed to create child pdev\n"); goto err3; } return 0; err3: - pm_runtime_disable(&pdev->dev); + pm_runtime_put(&pdev->dev); err2: + pm_runtime_disable(&pdev->dev); kfree(glue->wrp); err1: kfree(glue); diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8c9bb1ad3069..681da06170c2 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -603,12 +603,12 @@ static int usbhsc_resume(struct device *dev) struct usbhs_priv *priv = dev_get_drvdata(dev); struct platform_device *pdev = usbhs_priv_to_pdev(priv); - usbhs_platform_call(priv, phy_reset, pdev); - if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) usbhsc_power_ctrl(priv, 1); - usbhsc_hotplug(priv); + usbhs_platform_call(priv, phy_reset, pdev); + + usbhsc_drvcllbck_notify_hotplug(pdev); return 0; } diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c index 1834cf50888c..9b69a1323294 100644 --- a/drivers/usb/renesas_usbhs/mod_host.c +++ b/drivers/usb/renesas_usbhs/mod_host.c @@ -1266,6 +1266,12 @@ static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, return ret; } +static int usbhsh_bus_nop(struct usb_hcd *hcd) +{ + /* nothing to do */ + return 0; +} + static struct hc_driver usbhsh_driver = { .description = usbhsh_hcd_name, .hcd_priv_size = sizeof(struct usbhsh_hpriv), @@ -1290,6 +1296,8 @@ static struct hc_driver usbhsh_driver = { */ .hub_status_data = usbhsh_hub_status_data, .hub_control = usbhsh_hub_control, + .bus_suspend = usbhsh_bus_nop, + .bus_resume = usbhsh_bus_nop, }; /* diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c index f398d1e34474..c15f2e7cefc7 100644 --- a/drivers/usb/serial/bus.c +++ b/drivers/usb/serial/bus.c @@ -61,18 +61,23 @@ static int usb_serial_device_probe(struct device *dev) goto exit; } + /* make sure suspend/resume doesn't race against port_probe */ + retval = usb_autopm_get_interface(port->serial->interface); + if (retval) + goto exit; + driver = port->serial->type; if (driver->port_probe) { retval = driver->port_probe(port); if (retval) - goto exit; + goto exit_with_autopm; } retval = device_create_file(dev, &dev_attr_port_number); if (retval) { if (driver->port_remove) retval = driver->port_remove(port); - goto exit; + goto exit_with_autopm; } minor = port->number; @@ -81,6 +86,8 @@ static int usb_serial_device_probe(struct device *dev) "%s converter now attached to ttyUSB%d\n", driver->description, minor); +exit_with_autopm: + usb_autopm_put_interface(port->serial->interface); exit: return retval; } @@ -96,6 +103,9 @@ static int usb_serial_device_remove(struct device *dev) if (!port) return -ENODEV; + /* make sure suspend/resume doesn't race against port_remove */ + usb_autopm_get_interface(port->serial->interface); + device_remove_file(&port->dev, &dev_attr_port_number); driver = port->serial->type; @@ -107,6 +117,7 @@ static int usb_serial_device_remove(struct device *dev) dev_info(dev, "%s converter now disconnected from ttyUSB%d\n", driver->description, minor); + usb_autopm_put_interface(port->serial->interface); return retval; } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index bc912e5a3beb..5620db6469e5 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, { USB_DEVICE(PI_VID, PI_E861_PID) }, + { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5661c7e2d415..5dd96ca6c380 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -795,6 +795,13 @@ #define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */ /* + * Kondo Kagaku Co.Ltd. + * http://www.kondo-robot.com/EN + */ +#define KONDO_VID 0x165c +#define KONDO_USB_SERIAL_PID 0x0002 + +/* * Bayer Ascensia Contour blood glucose meter USB-converter cable. * http://winglucofacts.com/cables/ */ diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c index 5811d34b6c6b..2cb30c535839 100644 --- a/drivers/usb/serial/ipw.c +++ b/drivers/usb/serial/ipw.c @@ -227,7 +227,6 @@ static void ipw_release(struct usb_serial *serial) { struct usb_wwan_intf_private *data = usb_get_serial_data(serial); - usb_wwan_release(serial); usb_set_serial_data(serial, NULL); kfree(data); } @@ -309,12 +308,12 @@ static struct usb_serial_driver ipw_device = { .description = "IPWireless converter", .id_table = id_table, .num_ports = 1, - .disconnect = usb_wwan_disconnect, .open = ipw_open, .close = ipw_close, .probe = ipw_probe, .attach = usb_wwan_startup, .release = ipw_release, + .port_remove = usb_wwan_port_remove, .dtr_rts = ipw_dtr_rts, .write = usb_wwan_write, }; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 57eca2448424..2f6da1e89bfa 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -82,8 +82,7 @@ * Defines used for sending commands to port */ -#define WAIT_FOR_EVER (HZ * 0) /* timeout urb is wait for ever */ -#define MOS_WDR_TIMEOUT (HZ * 5) /* default urb timeout */ +#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */ #define MOS_PORT1 0x0200 #define MOS_PORT2 0x0300 @@ -1232,9 +1231,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty) return 0; spin_lock_irqsave(&mos7840_port->pool_lock, flags); - for (i = 0; i < NUM_URBS; ++i) - if (mos7840_port->busy[i]) - chars += URB_TRANSFER_BUFFER_SIZE; + for (i = 0; i < NUM_URBS; ++i) { + if (mos7840_port->busy[i]) { + struct urb *urb = mos7840_port->write_urb_pool[i]; + chars += urb->transfer_buffer_length; + } + } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; @@ -1344,7 +1346,7 @@ static void mos7840_close(struct usb_serial_port *port) static void mos7840_block_until_chase_response(struct tty_struct *tty, struct moschip_port *mos7840_port) { - int timeout = 1 * HZ; + int timeout = msecs_to_jiffies(1000); int wait = 10; int count; @@ -2672,7 +2674,7 @@ static int mos7840_startup(struct usb_serial *serial) /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); + (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT); return 0; error: for (/* nothing */; i >= 0; i--) { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 08ff9b862049..cc40f47ecea1 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -80,85 +80,9 @@ static void option_instat_callback(struct urb *urb); #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 -#define HUAWEI_PRODUCT_E600 0x1001 -#define HUAWEI_PRODUCT_E220 0x1003 -#define HUAWEI_PRODUCT_E220BIS 0x1004 -#define HUAWEI_PRODUCT_E1401 0x1401 -#define HUAWEI_PRODUCT_E1402 0x1402 -#define HUAWEI_PRODUCT_E1403 0x1403 -#define HUAWEI_PRODUCT_E1404 0x1404 -#define HUAWEI_PRODUCT_E1405 0x1405 -#define HUAWEI_PRODUCT_E1406 0x1406 -#define HUAWEI_PRODUCT_E1407 0x1407 -#define HUAWEI_PRODUCT_E1408 0x1408 -#define HUAWEI_PRODUCT_E1409 0x1409 -#define HUAWEI_PRODUCT_E140A 0x140A -#define HUAWEI_PRODUCT_E140B 0x140B -#define HUAWEI_PRODUCT_E140C 0x140C -#define HUAWEI_PRODUCT_E140D 0x140D -#define HUAWEI_PRODUCT_E140E 0x140E -#define HUAWEI_PRODUCT_E140F 0x140F -#define HUAWEI_PRODUCT_E1410 0x1410 -#define HUAWEI_PRODUCT_E1411 0x1411 -#define HUAWEI_PRODUCT_E1412 0x1412 -#define HUAWEI_PRODUCT_E1413 0x1413 -#define HUAWEI_PRODUCT_E1414 0x1414 -#define HUAWEI_PRODUCT_E1415 0x1415 -#define HUAWEI_PRODUCT_E1416 0x1416 -#define HUAWEI_PRODUCT_E1417 0x1417 -#define HUAWEI_PRODUCT_E1418 0x1418 -#define HUAWEI_PRODUCT_E1419 0x1419 -#define HUAWEI_PRODUCT_E141A 0x141A -#define HUAWEI_PRODUCT_E141B 0x141B -#define HUAWEI_PRODUCT_E141C 0x141C -#define HUAWEI_PRODUCT_E141D 0x141D -#define HUAWEI_PRODUCT_E141E 0x141E -#define HUAWEI_PRODUCT_E141F 0x141F -#define HUAWEI_PRODUCT_E1420 0x1420 -#define HUAWEI_PRODUCT_E1421 0x1421 -#define HUAWEI_PRODUCT_E1422 0x1422 -#define HUAWEI_PRODUCT_E1423 0x1423 -#define HUAWEI_PRODUCT_E1424 0x1424 -#define HUAWEI_PRODUCT_E1425 0x1425 -#define HUAWEI_PRODUCT_E1426 0x1426 -#define HUAWEI_PRODUCT_E1427 0x1427 -#define HUAWEI_PRODUCT_E1428 0x1428 -#define HUAWEI_PRODUCT_E1429 0x1429 -#define HUAWEI_PRODUCT_E142A 0x142A -#define HUAWEI_PRODUCT_E142B 0x142B -#define HUAWEI_PRODUCT_E142C 0x142C -#define HUAWEI_PRODUCT_E142D 0x142D -#define HUAWEI_PRODUCT_E142E 0x142E -#define HUAWEI_PRODUCT_E142F 0x142F -#define HUAWEI_PRODUCT_E1430 0x1430 -#define HUAWEI_PRODUCT_E1431 0x1431 -#define HUAWEI_PRODUCT_E1432 0x1432 -#define HUAWEI_PRODUCT_E1433 0x1433 -#define HUAWEI_PRODUCT_E1434 0x1434 -#define HUAWEI_PRODUCT_E1435 0x1435 -#define HUAWEI_PRODUCT_E1436 0x1436 -#define HUAWEI_PRODUCT_E1437 0x1437 -#define HUAWEI_PRODUCT_E1438 0x1438 -#define HUAWEI_PRODUCT_E1439 0x1439 -#define HUAWEI_PRODUCT_E143A 0x143A -#define HUAWEI_PRODUCT_E143B 0x143B -#define HUAWEI_PRODUCT_E143C 0x143C -#define HUAWEI_PRODUCT_E143D 0x143D -#define HUAWEI_PRODUCT_E143E 0x143E -#define HUAWEI_PRODUCT_E143F 0x143F #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 -#define HUAWEI_PRODUCT_E14AC 0x14AC -#define HUAWEI_PRODUCT_K3806 0x14AE #define HUAWEI_PRODUCT_K4605 0x14C6 -#define HUAWEI_PRODUCT_K5005 0x14C8 -#define HUAWEI_PRODUCT_K3770 0x14C9 -#define HUAWEI_PRODUCT_K3771 0x14CA -#define HUAWEI_PRODUCT_K4510 0x14CB -#define HUAWEI_PRODUCT_K4511 0x14CC -#define HUAWEI_PRODUCT_ETS1220 0x1803 -#define HUAWEI_PRODUCT_E353 0x1506 -#define HUAWEI_PRODUCT_E173S 0x1C05 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -615,104 +539,123 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */ + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, + + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -943,6 +886,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -1297,8 +1242,8 @@ static struct usb_serial_driver option_1port_device = { .tiocmset = usb_wwan_tiocmset, .ioctl = usb_wwan_ioctl, .attach = usb_wwan_startup, - .disconnect = usb_wwan_disconnect, .release = option_release, + .port_remove = usb_wwan_port_remove, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, @@ -1414,8 +1359,6 @@ static void option_release(struct usb_serial *serial) struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct option_private *priv = intfdata->private; - usb_wwan_release(serial); - kfree(priv); kfree(intfdata); } diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 8d103019d6aa..bfd50779f0c9 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -199,43 +199,49 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) /* default to enabling interface */ altsetting = 0; - switch (ifnum) { - /* Composite mode; don't bind to the QMI/net interface as that - * gets handled by other drivers. - */ + /* Composite mode; don't bind to the QMI/net interface as that + * gets handled by other drivers. + */ + + if (is_gobi1k) { /* Gobi 1K USB layout: * 0: serial port (doesn't respond) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net - * - * Gobi 2K+ USB layout: + */ + if (ifnum == 2) + dev_dbg(dev, "Modem port found\n"); + else + altsetting = -1; + } else { + /* Gobi 2K+ USB layout: * 0: QMI/net * 1: DM/DIAG (use libqcdm from ModemManager for communication) * 2: AT-capable modem port * 3: NMEA */ - - case 1: - if (is_gobi1k) + switch (ifnum) { + case 0: + /* Don't claim the QMI/net interface */ altsetting = -1; - else + break; + case 1: dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n"); - break; - case 2: - dev_dbg(dev, "Modem port found\n"); - break; - case 3: - if (is_gobi1k) - altsetting = -1; - else + break; + case 2: + dev_dbg(dev, "Modem port found\n"); + break; + case 3: /* * NMEA (serial line 9600 8N1) * # echo "\$GPS_START" > /dev/ttyUSBx * # echo "\$GPS_STOP" > /dev/ttyUSBx */ dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n"); + break; + } } done: @@ -262,8 +268,7 @@ static void qc_release(struct usb_serial *serial) { struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); - /* Call usb_wwan release & free the private data allocated in qcprobe */ - usb_wwan_release(serial); + /* Free the private data allocated in qcprobe */ usb_set_serial_data(serial, NULL); kfree(priv); } @@ -283,8 +288,8 @@ static struct usb_serial_driver qcdevice = { .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .attach = usb_wwan_startup, - .disconnect = usb_wwan_disconnect, .release = qc_release, + .port_remove = usb_wwan_port_remove, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h index c47b6ec03063..1f034d2397c6 100644 --- a/drivers/usb/serial/usb-wwan.h +++ b/drivers/usb/serial/usb-wwan.h @@ -9,8 +9,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); extern void usb_wwan_close(struct usb_serial_port *port); extern int usb_wwan_startup(struct usb_serial *serial); -extern void usb_wwan_disconnect(struct usb_serial *serial); -extern void usb_wwan_release(struct usb_serial *serial); +extern int usb_wwan_port_remove(struct usb_serial_port *port); extern int usb_wwan_write_room(struct tty_struct *tty); extern void usb_wwan_set_termios(struct tty_struct *tty, struct usb_serial_port *port, diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index f35971dff4a5..6855d5ed0331 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -565,62 +565,52 @@ bail_out_error: } EXPORT_SYMBOL(usb_wwan_startup); -static void stop_read_write_urbs(struct usb_serial *serial) +int usb_wwan_port_remove(struct usb_serial_port *port) { - int i, j; - struct usb_serial_port *port; + int i; struct usb_wwan_port_private *portdata; - /* Stop reading/writing urbs */ - for (i = 0; i < serial->num_ports; ++i) { - port = serial->port[i]; - portdata = usb_get_serial_port_data(port); - for (j = 0; j < N_IN_URB; j++) - usb_kill_urb(portdata->in_urbs[j]); - for (j = 0; j < N_OUT_URB; j++) - usb_kill_urb(portdata->out_urbs[j]); + portdata = usb_get_serial_port_data(port); + usb_set_serial_port_data(port, NULL); + + /* Stop reading/writing urbs and free them */ + for (i = 0; i < N_IN_URB; i++) { + usb_kill_urb(portdata->in_urbs[i]); + usb_free_urb(portdata->in_urbs[i]); + free_page((unsigned long)portdata->in_buffer[i]); + } + for (i = 0; i < N_OUT_URB; i++) { + usb_kill_urb(portdata->out_urbs[i]); + usb_free_urb(portdata->out_urbs[i]); + kfree(portdata->out_buffer[i]); } -} -void usb_wwan_disconnect(struct usb_serial *serial) -{ - stop_read_write_urbs(serial); + /* Now free port private data */ + kfree(portdata); + return 0; } -EXPORT_SYMBOL(usb_wwan_disconnect); +EXPORT_SYMBOL(usb_wwan_port_remove); -void usb_wwan_release(struct usb_serial *serial) +#ifdef CONFIG_PM +static void stop_read_write_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; - /* Now free them */ + /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); - - for (j = 0; j < N_IN_URB; j++) { - usb_free_urb(portdata->in_urbs[j]); - free_page((unsigned long) - portdata->in_buffer[j]); - portdata->in_urbs[j] = NULL; - } - for (j = 0; j < N_OUT_URB; j++) { - usb_free_urb(portdata->out_urbs[j]); - kfree(portdata->out_buffer[j]); - portdata->out_urbs[j] = NULL; - } - } - - /* Now free per port private data */ - for (i = 0; i < serial->num_ports; i++) { - port = serial->port[i]; - kfree(usb_get_serial_port_data(port)); + if (!portdata) + continue; + for (j = 0; j < N_IN_URB; j++) + usb_kill_urb(portdata->in_urbs[j]); + for (j = 0; j < N_OUT_URB; j++) + usb_kill_urb(portdata->out_urbs[j]); } } -EXPORT_SYMBOL(usb_wwan_release); -#ifdef CONFIG_PM int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = serial->private; @@ -712,7 +702,7 @@ int usb_wwan_resume(struct usb_serial *serial) /* skip closed ports */ spin_lock_irq(&intfdata->susp_lock); - if (!portdata->opened) { + if (!portdata || !portdata->opened) { spin_unlock_irq(&intfdata->susp_lock); continue; } diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 9591e2b509d7..17830c9c7cc6 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) return group; } +/* called with vfio.group_lock held */ static void vfio_group_release(struct kref *kref) { struct vfio_group *group = container_of(kref, struct vfio_group, kref); @@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref) static void vfio_group_put(struct vfio_group *group) { - mutex_lock(&vfio.group_lock); - /* - * Release needs to unlock to unregister the notifier, so only - * unlock if not released. - */ - if (!kref_put(&group->kref, vfio_group_release)) - mutex_unlock(&vfio.group_lock); + kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); } /* Assume group_lock or group reference is held */ @@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref) struct vfio_device, kref); struct vfio_group *group = device->group; - mutex_lock(&group->device_lock); list_del(&device->group_next); mutex_unlock(&group->device_lock); @@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref) /* Device reference always implies a group reference */ static void vfio_device_put(struct vfio_device *device) { - kref_put(&device->kref, vfio_device_release); - vfio_group_put(device->group); + struct vfio_group *group = device->group; + kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); + vfio_group_put(group); } static void vfio_device_get(struct vfio_device *device) @@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) */ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - fd_install(ret, filep); - vfio_device_get(device); atomic_inc(&group->container_users); + + fd_install(ret, filep); break; } mutex_unlock(&group->device_lock); diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index e4e2fd1b5107..202bba6c997c 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -9,3 +9,6 @@ config VHOST_NET To compile this driver as a module, choose M here: the module will be called vhost_net. +if STAGING +source "drivers/vhost/Kconfig.tcm" +endif diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm new file mode 100644 index 000000000000..a9c6f76e3208 --- /dev/null +++ b/drivers/vhost/Kconfig.tcm @@ -0,0 +1,6 @@ +config TCM_VHOST + tristate "TCM_VHOST fabric module (EXPERIMENTAL)" + depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m + default n + ---help--- + Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile index 72dd02050bb9..a27b053bc9ab 100644 --- a/drivers/vhost/Makefile +++ b/drivers/vhost/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_VHOST_NET) += vhost_net.o vhost_net-y := vhost.o net.o + +obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c new file mode 100644 index 000000000000..fb366540ed54 --- /dev/null +++ b/drivers/vhost/tcm_vhost.c @@ -0,0 +1,1628 @@ +/******************************************************************************* + * Vhost kernel TCM fabric driver for virtio SCSI initiators + * + * (C) Copyright 2010-2012 RisingTide Systems LLC. + * (C) Copyright 2010-2012 IBM Corp. + * + * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * + * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com> + * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ****************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <generated/utsrelease.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/compat.h> +#include <linux/eventfd.h> +#include <linux/vhost.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <asm/unaligned.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/target_core_fabric_configfs.h> +#include <target/target_core_configfs.h> +#include <target/configfs_macros.h> +#include <linux/vhost.h> +#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */ +#include <linux/virtio_scsi.h> + +#include "vhost.c" +#include "vhost.h" +#include "tcm_vhost.h" + +struct vhost_scsi { + atomic_t vhost_ref_cnt; + struct tcm_vhost_tpg *vs_tpg; + struct vhost_dev dev; + struct vhost_virtqueue vqs[3]; + + struct vhost_work vs_completion_work; /* cmd completion work item */ + struct list_head vs_completion_list; /* cmd completion queue */ + spinlock_t vs_completion_lock; /* protects s_completion_list */ +}; + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *tcm_vhost_fabric_configfs; + +static struct workqueue_struct *tcm_vhost_workqueue; + +/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ +static DEFINE_MUTEX(tcm_vhost_mutex); +static LIST_HEAD(tcm_vhost_list); + +static int tcm_vhost_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int tcm_vhost_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *tcm_vhost_get_fabric_name(void) +{ + return "vhost"; +} + +static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_FCP: + return fc_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_fabric_proto_ident(se_tpg); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_fabric_proto_ident(se_tpg); +} + +static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 tcm_vhost_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); +} + +static u32 tcm_vhost_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); +} + +static char *tcm_vhost_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_FCP: + return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); +} + +static struct se_node_acl *tcm_vhost_alloc_fabric_acl( + struct se_portal_group *se_tpg) +{ + struct tcm_vhost_nacl *nacl; + + nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to alocate struct tcm_vhost_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void tcm_vhost_release_fabric_acl( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_nacl, + struct tcm_vhost_nacl, se_node_acl); + kfree(nacl); +} + +static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) +{ + return; +} + +static int tcm_vhost_shutdown_session(struct se_session *se_sess) +{ + return 0; +} + +static void tcm_vhost_close_session(struct se_session *se_sess) +{ + return; +} + +static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int tcm_vhost_write_pending(struct se_cmd *se_cmd) +{ + /* Go ahead and process the write immediately */ + target_execute_cmd(se_cmd); + return 0; +} + +static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) +{ + return; +} + +static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) +{ + return 0; +} + +static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *); + +static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_status(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) +{ + return 0; +} + +static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd, + u32 sense_length) +{ + return 0; +} + +static u16 tcm_vhost_get_fabric_sense_len(void) +{ + return 0; +} + +static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + + /* TODO locking against target/backend threads? */ + transport_generic_free_cmd(se_cmd, 1); + + if (tv_cmd->tvc_sgl_count) { + u32 i; + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + + kfree(tv_cmd->tvc_sgl); + } + + kfree(tv_cmd); +} + +/* Dequeue a command from the completion list */ +static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion( + struct vhost_scsi *vs) +{ + struct tcm_vhost_cmd *tv_cmd = NULL; + + spin_lock_bh(&vs->vs_completion_lock); + if (list_empty(&vs->vs_completion_list)) { + spin_unlock_bh(&vs->vs_completion_lock); + return NULL; + } + + list_for_each_entry(tv_cmd, &vs->vs_completion_list, + tvc_completion_list) { + list_del(&tv_cmd->tvc_completion_list); + break; + } + spin_unlock_bh(&vs->vs_completion_lock); + return tv_cmd; +} + +/* Fill in status and signal that we are done processing this command + * + * This is scheduled in the vhost work queue so we are called with the owner + * process mm and can access the vring. + */ +static void vhost_scsi_complete_cmd_work(struct vhost_work *work) +{ + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, + vs_completion_work); + struct tcm_vhost_cmd *tv_cmd; + + while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) { + struct virtio_scsi_cmd_resp v_rsp; + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + int ret; + + pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, + tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); + + memset(&v_rsp, 0, sizeof(v_rsp)); + v_rsp.resid = se_cmd->residual_count; + /* TODO is status_qualifier field needed? */ + v_rsp.status = se_cmd->scsi_status; + v_rsp.sense_len = se_cmd->scsi_sense_length; + memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, + v_rsp.sense_len); + ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); + if (likely(ret == 0)) + vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); + + vhost_scsi_free_cmd(tv_cmd); + } + + vhost_signal(&vs->dev, &vs->vqs[2]); +} + +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct vhost_scsi *vs = tv_cmd->tvc_vhost; + + pr_debug("%s tv_cmd %p\n", __func__, tv_cmd); + + spin_lock_bh(&vs->vs_completion_lock); + list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); + spin_unlock_bh(&vs->vs_completion_lock); + + vhost_work_queue(&vs->dev, &vs->vs_completion_work); +} + +static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( + struct tcm_vhost_tpg *tv_tpg, + struct virtio_scsi_cmd_req *v_req, + u32 exp_data_len, + int data_direction) +{ + struct tcm_vhost_cmd *tv_cmd; + struct tcm_vhost_nexus *tv_nexus; + struct se_portal_group *se_tpg = &tv_tpg->se_tpg; + struct se_session *se_sess; + struct se_cmd *se_cmd; + int sam_task_attr; + + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + pr_err("Unable to locate active struct tcm_vhost_nexus\n"); + return ERR_PTR(-EIO); + } + se_sess = tv_nexus->tvn_se_sess; + + tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); + if (!tv_cmd) { + pr_err("Unable to allocate struct tcm_vhost_cmd\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&tv_cmd->tvc_completion_list); + tv_cmd->tvc_tag = v_req->tag; + + se_cmd = &tv_cmd->tvc_se_cmd; + /* + * Locate the SAM Task Attr from virtio_scsi_cmd_req + */ + sam_task_attr = v_req->task_attr; + /* + * Initialize struct se_cmd descriptor from TCM infrastructure + */ + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len, + data_direction, sam_task_attr, + &tv_cmd->tvc_sense_buf[0]); + +#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */ + if (bidi) + se_cmd->se_cmd_flags |= SCF_BIDI; +#endif + return tv_cmd; +} + +/* + * Map a user memory range into a scatterlist + * + * Returns the number of scatterlist entries used or -errno on error. + */ +static int vhost_scsi_map_to_sgl(struct scatterlist *sgl, + unsigned int sgl_count, void __user *ptr, size_t len, int write) +{ + struct scatterlist *sg = sgl; + unsigned int npages = 0; + int ret; + + while (len > 0) { + struct page *page; + unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK; + unsigned int nbytes = min_t(unsigned int, + PAGE_SIZE - offset, len); + + if (npages == sgl_count) { + ret = -ENOBUFS; + goto err; + } + + ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page); + BUG_ON(ret == 0); /* we should either get our page or fail */ + if (ret < 0) + goto err; + + sg_set_page(sg, page, nbytes, offset); + ptr += nbytes; + len -= nbytes; + sg++; + npages++; + } + return npages; + +err: + /* Put pages that we hold */ + for (sg = sgl; sg != &sgl[npages]; sg++) + put_page(sg_page(sg)); + return ret; +} + +static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, + struct iovec *iov, unsigned int niov, int write) +{ + int ret; + unsigned int i; + u32 sgl_count; + struct scatterlist *sg; + + /* + * Find out how long sglist needs to be + */ + sgl_count = 0; + for (i = 0; i < niov; i++) { + sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len + + PAGE_SIZE - 1) >> PAGE_SHIFT) - + ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT); + } + /* TODO overflow checking */ + + sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); + if (!sg) + return -ENOMEM; + pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__, + sg, sgl_count, IS_ERR(sg)); + sg_init_table(sg, sgl_count); + + tv_cmd->tvc_sgl = sg; + tv_cmd->tvc_sgl_count = sgl_count; + + pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); + for (i = 0; i < niov; i++) { + ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base, + iov[i].iov_len, write); + if (ret < 0) { + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + kfree(tv_cmd->tvc_sgl); + tv_cmd->tvc_sgl = NULL; + tv_cmd->tvc_sgl_count = 0; + return ret; + } + + sg += ret; + sgl_count -= ret; + } + return 0; +} + +static void tcm_vhost_submission_work(struct work_struct *work) +{ + struct tcm_vhost_cmd *tv_cmd = + container_of(work, struct tcm_vhost_cmd, work); + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; + int rc, sg_no_bidi = 0; + /* + * Locate the struct se_lun pointer based on v_req->lun, and + * attach it to struct se_cmd + */ + rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun); + if (rc < 0) { + pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun); + transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd, + tv_cmd->tvc_se_cmd.scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb); + if (rc == -ENOMEM) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } else if (rc < 0) { + if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) + tcm_vhost_queue_status(se_cmd); + else + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + if (tv_cmd->tvc_sgl_count) { + sg_ptr = tv_cmd->tvc_sgl; + /* + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. + */ +/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ +#if 0 + if (se_cmd->se_cmd_flags & SCF_BIDI) { + sg_bidi_ptr = NULL; + sg_no_bidi = 0; + } +#endif + } else { + sg_ptr = NULL; + } + + rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr, + tv_cmd->tvc_sgl_count, sg_bidi_ptr, + sg_no_bidi); + if (rc < 0) { + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + transport_handle_cdb_direct(se_cmd); +} + +static void vhost_scsi_handle_vq(struct vhost_scsi *vs) +{ + struct vhost_virtqueue *vq = &vs->vqs[2]; + struct virtio_scsi_cmd_req v_req; + struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_cmd *tv_cmd; + u32 exp_data_len, data_first, data_num, data_direction; + unsigned out, in, i; + int head, ret; + + /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ + tv_tpg = vs->vs_tpg; + if (unlikely(!tv_tpg)) { + pr_err("%s endpoint not set\n", __func__); + return; + } + + mutex_lock(&vq->mutex); + vhost_disable_notify(&vs->dev, vq); + + for (;;) { + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, + ARRAY_SIZE(vq->iov), &out, &in, + NULL, NULL); + pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", + head, out, in); + /* On error, stop handling until the next kick. */ + if (unlikely(head < 0)) + break; + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (head == vq->num) { + if (unlikely(vhost_enable_notify(&vs->dev, vq))) { + vhost_disable_notify(&vs->dev, vq); + continue; + } + break; + } + +/* FIXME: BIDI operation */ + if (out == 1 && in == 1) { + data_direction = DMA_NONE; + data_first = 0; + data_num = 0; + } else if (out == 1 && in > 1) { + data_direction = DMA_FROM_DEVICE; + data_first = out + 1; + data_num = in - 1; + } else if (out > 1 && in == 1) { + data_direction = DMA_TO_DEVICE; + data_first = 1; + data_num = out - 1; + } else { + vq_err(vq, "Invalid buffer layout out: %u in: %u\n", + out, in); + break; + } + + /* + * Check for a sane resp buffer so we can report errors to + * the guest. + */ + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes\n", vq->iov[out].iov_len); + break; + } + + if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { + vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" + " bytes\n", vq->iov[0].iov_len); + break; + } + pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," + " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); + ret = __copy_from_user(&v_req, vq->iov[0].iov_base, + sizeof(v_req)); + if (unlikely(ret)) { + vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); + break; + } + + exp_data_len = 0; + for (i = 0; i < data_num; i++) + exp_data_len += vq->iov[data_first + i].iov_len; + + tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, + exp_data_len, data_direction); + if (IS_ERR(tv_cmd)) { + vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", + PTR_ERR(tv_cmd)); + break; + } + pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" + ": %d\n", tv_cmd, exp_data_len, data_direction); + + tv_cmd->tvc_vhost = vs; + + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes, out: %d, in: %d\n", + vq->iov[out].iov_len, out, in); + break; + } + + tv_cmd->tvc_resp = vq->iov[out].iov_base; + + /* + * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb + * that will be used by tcm_vhost_new_cmd_map() and down into + * target_setup_cmd_from_cdb() + */ + memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); + /* + * Check that the recieved CDB size does not exceeded our + * hardcoded max for tcm_vhost + */ + /* TODO what if cdb was too small for varlen cdb header? */ + if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) > + TCM_VHOST_MAX_CDB_SIZE)) { + vq_err(vq, "Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(tv_cmd->tvc_cdb), + TCM_VHOST_MAX_CDB_SIZE); + break; /* TODO */ + } + tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; + + pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", + tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun); + + if (data_direction != DMA_NONE) { + ret = vhost_scsi_map_iov_to_sgl(tv_cmd, + &vq->iov[data_first], data_num, + data_direction == DMA_TO_DEVICE); + if (unlikely(ret)) { + vq_err(vq, "Failed to map iov to sgl\n"); + break; /* TODO */ + } + } + + /* + * Save the descriptor from vhost_get_vq_desc() to be used to + * complete the virtio-scsi request in TCM callback context via + * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() + */ + tv_cmd->tvc_vq_desc = head; + /* + * Dispatch tv_cmd descriptor for cmwq execution in process + * context provided by tcm_vhost_workqueue. This also ensures + * tv_cmd is executed on the same kworker CPU as this vhost + * thread to gain positive L2 cache locality effects.. + */ + INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work); + queue_work(tcm_vhost_workqueue, &tv_cmd->work); + } + + mutex_unlock(&vq->mutex); +} + +static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) +{ + pr_err("%s: The handling func for control queue.\n", __func__); +} + +static void vhost_scsi_evt_handle_kick(struct vhost_work *work) +{ + pr_err("%s: The handling func for event queue.\n", __func__); +} + +static void vhost_scsi_handle_kick(struct vhost_work *work) +{ + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); + + vhost_scsi_handle_vq(vs); +} + +/* + * Called from vhost_scsi_ioctl() context to walk the list of available + * tcm_vhost_tpg with an active struct tcm_vhost_nexus + */ +static int vhost_scsi_set_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(&vs->vqs[index])) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + } + + if (vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -EEXIST; + } + mutex_unlock(&vs->dev.mutex); + + mutex_lock(&tcm_vhost_mutex); + list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (!tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + tv_tport = tv_tpg->tport; + + if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && + (tv_tpg->tport_tpgt == t->vhost_tpgt)) { + atomic_inc(&tv_tpg->tv_tpg_vhost_count); + smp_mb__after_atomic_inc(); + mutex_unlock(&tv_tpg->tv_tpg_mutex); + mutex_unlock(&tcm_vhost_mutex); + + mutex_lock(&vs->dev.mutex); + vs->vs_tpg = tv_tpg; + atomic_inc(&vs->vhost_ref_cnt); + smp_mb__after_atomic_inc(); + mutex_unlock(&vs->dev.mutex); + return 0; + } + mutex_unlock(&tv_tpg->tv_tpg_mutex); + } + mutex_unlock(&tcm_vhost_mutex); + return -EINVAL; +} + +static int vhost_scsi_clear_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + if (!vhost_vq_access_ok(&vs->vqs[index])) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + } + + if (!vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -ENODEV; + } + tv_tpg = vs->vs_tpg; + tv_tport = tv_tpg->tport; + + if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || + (tv_tpg->tport_tpgt != t->vhost_tpgt)) { + mutex_unlock(&vs->dev.mutex); + pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" + " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", + tv_tport->tport_name, tv_tpg->tport_tpgt, + t->vhost_wwpn, t->vhost_tpgt); + return -EINVAL; + } + atomic_dec(&tv_tpg->tv_tpg_vhost_count); + vs->vs_tpg = NULL; + mutex_unlock(&vs->dev.mutex); + + return 0; +} + +static int vhost_scsi_open(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s; + int r; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); + INIT_LIST_HEAD(&s->vs_completion_list); + spin_lock_init(&s->vs_completion_lock); + + s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick; + s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick; + s->vqs[2].handle_kick = vhost_scsi_handle_kick; + r = vhost_dev_init(&s->dev, s->vqs, 3); + if (r < 0) { + kfree(s); + return r; + } + + f->private_data = s; + return 0; +} + +static int vhost_scsi_release(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s = f->private_data; + + if (s->vs_tpg && s->vs_tpg->tport) { + struct vhost_scsi_target backend; + + memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name, + sizeof(backend.vhost_wwpn)); + backend.vhost_tpgt = s->vs_tpg->tport_tpgt; + vhost_scsi_clear_endpoint(s, &backend); + } + + vhost_dev_cleanup(&s->dev, false); + kfree(s); + return 0; +} + +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + /* TODO possibly smp_wmb() and flush vqs */ + mutex_unlock(&vs->dev.mutex); + return 0; +} + +static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct vhost_scsi *vs = f->private_data; + struct vhost_scsi_target backend; + void __user *argp = (void __user *)arg; + u64 __user *featurep = argp; + u64 features; + int r; + + switch (ioctl) { + case VHOST_SCSI_SET_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + + return vhost_scsi_set_endpoint(vs, &backend); + case VHOST_SCSI_CLEAR_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + + return vhost_scsi_clear_endpoint(vs, &backend); + case VHOST_SCSI_GET_ABI_VERSION: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + + backend.abi_version = VHOST_SCSI_ABI_VERSION; + + if (copy_to_user(argp, &backend, sizeof backend)) + return -EFAULT; + return 0; + case VHOST_GET_FEATURES: + features = VHOST_FEATURES; + if (copy_to_user(featurep, &features, sizeof features)) + return -EFAULT; + return 0; + case VHOST_SET_FEATURES: + if (copy_from_user(&features, featurep, sizeof features)) + return -EFAULT; + return vhost_scsi_set_features(vs, features); + default: + mutex_lock(&vs->dev.mutex); + r = vhost_dev_ioctl(&vs->dev, ioctl, arg); + mutex_unlock(&vs->dev.mutex); + return r; + } +} + +static const struct file_operations vhost_scsi_fops = { + .owner = THIS_MODULE, + .release = vhost_scsi_release, + .unlocked_ioctl = vhost_scsi_ioctl, + /* TODO compat ioctl? */ + .open = vhost_scsi_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vhost_scsi_misc = { + MISC_DYNAMIC_MINOR, + "vhost-scsi", + &vhost_scsi_fops, +}; + +static int __init vhost_scsi_register(void) +{ + return misc_register(&vhost_scsi_misc); +} + +static int vhost_scsi_deregister(void) +{ + return misc_deregister(&vhost_scsi_misc); +} + +static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) +{ + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static int tcm_vhost_port_link( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + atomic_inc(&tv_tpg->tv_tpg_port_count); + smp_mb__after_atomic_inc(); + + return 0; +} + +static void tcm_vhost_port_unlink( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + atomic_dec(&tv_tpg->tv_tpg_port_count); + smp_mb__after_atomic_dec(); +} + +static struct se_node_acl *tcm_vhost_make_nodeacl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl, *se_nacl_new; + struct tcm_vhost_nacl *nacl; + u64 wwpn = 0; + u32 nexus_depth; + + /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); + + nexus_depth = 1; + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, nexus_depth); + if (IS_ERR(se_nacl)) { + tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + /* + * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN + */ + nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); + nacl->iport_wwpn = wwpn; + + return se_nacl; +} + +static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_acl, + struct tcm_vhost_nacl, se_node_acl); + core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); + kfree(nacl); +} + +static int tcm_vhost_make_nexus( + struct tcm_vhost_tpg *tv_tpg, + const char *name) +{ + struct se_portal_group *se_tpg; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("tv_tpg->tpg_nexus already exists\n"); + return -EEXIST; + } + se_tpg = &tv_tpg->se_tpg; + + tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_err("Unable to allocate struct tcm_vhost_nexus\n"); + return -ENOMEM; + } + /* + * Initialize the struct se_session pointer + */ + tv_nexus->tvn_se_sess = transport_init_session(); + if (IS_ERR(tv_nexus->tvn_se_sess)) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Since we are running in 'demo mode' this call with generate a + * struct se_node_acl for the tcm_vhost struct se_portal_group with + * the SCSI Initiator port name of the passed configfs group 'name'. + */ + tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( + se_tpg, (unsigned char *)name); + if (!tv_nexus->tvn_se_sess->se_node_acl) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("core_tpg_check_initiator_node_acl() failed" + " for %s\n", name); + transport_free_session(tv_nexus->tvn_se_sess); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Now register the TCM vHost virtual I_T Nexus as active with the + * call to __transport_register_session() + */ + __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + tv_nexus->tvn_se_sess, tv_nexus); + tv_tpg->tpg_nexus = tv_nexus; + + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return 0; +} + +static int tcm_vhost_drop_nexus( + struct tcm_vhost_tpg *tpg) +{ + struct se_session *se_sess; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tpg->tv_tpg_mutex); + tv_nexus = tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + se_sess = tv_nexus->tvn_se_sess; + if (!se_sess) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + if (atomic_read(&tpg->tv_tpg_port_count)) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vHost I_T Nexus with" + " active TPG port count: %d\n", + atomic_read(&tpg->tv_tpg_port_count)); + return -EPERM; + } + + if (atomic_read(&tpg->tv_tpg_vhost_count)) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vHost I_T Nexus with" + " active TPG vhost count: %d\n", + atomic_read(&tpg->tv_tpg_vhost_count)); + return -EPERM; + } + + pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated" + " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + /* + * Release the SCSI I_T Nexus to the emulated vHost Target Port + */ + transport_deregister_session(tv_nexus->tvn_se_sess); + tpg->tpg_nexus = NULL; + mutex_unlock(&tpg->tv_tpg_mutex); + + kfree(tv_nexus); + return 0; +} + +static ssize_t tcm_vhost_tpg_show_nexus( + struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_nexus *tv_nexus; + ssize_t ret; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return -ENODEV; + } + ret = snprintf(page, PAGE_SIZE, "%s\n", + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + mutex_unlock(&tv_tpg->tv_tpg_mutex); + + return ret; +} + +static ssize_t tcm_vhost_tpg_store_nexus( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport_wwn = tv_tpg->tport; + unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; + int ret; + /* + * Shutdown the active I_T nexus if 'NULL' is passed.. + */ + if (!strncmp(page, "NULL", 4)) { + ret = tcm_vhost_drop_nexus(tv_tpg); + return (!ret) ? count : ret; + } + /* + * Otherwise make sure the passed virtual Initiator port WWN matches + * the fabric protocol_id set in tcm_vhost_make_tport(), and call + * tcm_vhost_make_nexus(). + */ + if (strlen(page) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated NAA Sas Address: %s, exceeds" + " max: %d\n", page, TCM_VHOST_NAMELEN); + return -EINVAL; + } + snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); + + ptr = strstr(i_port, "naa."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { + pr_err("Passed SAS Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + ptr = strstr(i_port, "fc."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { + pr_err("Passed FCP Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[3]; /* Skip over "fc." */ + goto check_newline; + } + ptr = strstr(i_port, "iqn."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { + pr_err("Passed iSCSI Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + pr_err("Unable to locate prefix for emulated Initiator Port:" + " %s\n", i_port); + return -EINVAL; + /* + * Clear any trailing newline for the NAA WWN + */ +check_newline: + if (i_port[strlen(i_port)-1] == '\n') + i_port[strlen(i_port)-1] = '\0'; + + ret = tcm_vhost_make_nexus(tv_tpg, port_ptr); + if (ret < 0) + return ret; + + return count; +} + +TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { + &tcm_vhost_tpg_nexus.attr, + NULL, +}; + +static struct se_portal_group *tcm_vhost_make_tpg( + struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + struct tcm_vhost_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_vhost_tpg"); + return ERR_PTR(-ENOMEM); + } + mutex_init(&tpg->tv_tpg_mutex); + INIT_LIST_HEAD(&tpg->tv_tpg_list); + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + + ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + mutex_lock(&tcm_vhost_mutex); + list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); + mutex_unlock(&tcm_vhost_mutex); + + return &tpg->se_tpg; +} + +static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tcm_vhost_mutex); + list_del(&tpg->tv_tpg_list); + mutex_unlock(&tcm_vhost_mutex); + /* + * Release the virtual I_T Nexus for this vHost TPG + */ + tcm_vhost_drop_nexus(tpg); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +static struct se_wwn *tcm_vhost_make_tport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport; + char *ptr; + u64 wwpn = 0; + int off = 0; + + /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + + tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); + if (!tport) { + pr_err("Unable to allocate struct tcm_vhost_tport"); + return ERR_PTR(-ENOMEM); + } + tport->tport_wwpn = wwpn; + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; + goto check_len; + } + + pr_err("Unable to locate prefix for emulated Target Port:" + " %s\n", name); + kfree(tport); + return ERR_PTR(-EINVAL); + +check_len: + if (strlen(name) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated %s Address: %s, exceeds" + " max: %d\n", name, tcm_vhost_dump_proto_id(tport), + TCM_VHOST_NAMELEN); + kfree(tport); + return ERR_PTR(-EINVAL); + } + snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); + + pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); + + return &tport->tport_wwn; +} + +static void tcm_vhost_drop_tport(struct se_wwn *wwn) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), + tport->tport_name); + + kfree(tport); +} + +static ssize_t tcm_vhost_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" + "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); +} + +TF_WWN_ATTR_RO(tcm_vhost, version); + +static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { + &tcm_vhost_wwn_version.attr, + NULL, +}; + +static struct target_core_fabric_ops tcm_vhost_ops = { + .get_fabric_name = tcm_vhost_get_fabric_name, + .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, + .tpg_get_wwn = tcm_vhost_get_fabric_wwn, + .tpg_get_tag = tcm_vhost_get_tag, + .tpg_get_default_depth = tcm_vhost_get_default_depth, + .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, + .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, + .tpg_check_demo_mode = tcm_vhost_check_true, + .tpg_check_demo_mode_cache = tcm_vhost_check_true, + .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, + .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, + .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, + .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, + .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, + .release_cmd = tcm_vhost_release_cmd, + .shutdown_session = tcm_vhost_shutdown_session, + .close_session = tcm_vhost_close_session, + .sess_get_index = tcm_vhost_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_vhost_write_pending, + .write_pending_status = tcm_vhost_write_pending_status, + .set_default_node_attributes = tcm_vhost_set_default_node_attrs, + .get_task_tag = tcm_vhost_get_task_tag, + .get_cmd_state = tcm_vhost_get_cmd_state, + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, + .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len, + .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_vhost_make_tport, + .fabric_drop_wwn = tcm_vhost_drop_tport, + .fabric_make_tpg = tcm_vhost_make_tpg, + .fabric_drop_tpg = tcm_vhost_drop_tpg, + .fabric_post_link = tcm_vhost_port_link, + .fabric_pre_unlink = tcm_vhost_port_unlink, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = tcm_vhost_make_nodeacl, + .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, +}; + +static int tcm_vhost_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + pr_debug("TCM_VHOST fabric module %s on %s/%s" + " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); + if (IS_ERR(fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + return PTR_ERR(fabric); + } + /* + * Setup fabric->tf_ops from our local tcm_vhost_ops + */ + fabric->tf_ops = tcm_vhost_ops; + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed" + " for TCM_VHOST\n"); + return ret; + } + /* + * Setup our local pointer to *fabric + */ + tcm_vhost_fabric_configfs = fabric; + pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); + return 0; +}; + +static void tcm_vhost_deregister_configfs(void) +{ + if (!tcm_vhost_fabric_configfs) + return; + + target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); + tcm_vhost_fabric_configfs = NULL; + pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); +}; + +static int __init tcm_vhost_init(void) +{ + int ret = -ENOMEM; + + tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); + if (!tcm_vhost_workqueue) + goto out; + + ret = vhost_scsi_register(); + if (ret < 0) + goto out_destroy_workqueue; + + ret = tcm_vhost_register_configfs(); + if (ret < 0) + goto out_vhost_scsi_deregister; + + return 0; + +out_vhost_scsi_deregister: + vhost_scsi_deregister(); +out_destroy_workqueue: + destroy_workqueue(tcm_vhost_workqueue); +out: + return ret; +}; + +static void tcm_vhost_exit(void) +{ + tcm_vhost_deregister_configfs(); + vhost_scsi_deregister(); + destroy_workqueue(tcm_vhost_workqueue); +}; + +MODULE_DESCRIPTION("TCM_VHOST series fabric driver"); +MODULE_LICENSE("GPL"); +module_init(tcm_vhost_init); +module_exit(tcm_vhost_exit); diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h new file mode 100644 index 000000000000..c983ed21e413 --- /dev/null +++ b/drivers/vhost/tcm_vhost.h @@ -0,0 +1,101 @@ +#define TCM_VHOST_VERSION "v0.1" +#define TCM_VHOST_NAMELEN 256 +#define TCM_VHOST_MAX_CDB_SIZE 32 + +struct tcm_vhost_cmd { + /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ + int tvc_vq_desc; + /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ + u64 tvc_tag; + /* The number of scatterlists associated with this cmd */ + u32 tvc_sgl_count; + /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ + u32 tvc_lun; + /* Pointer to the SGL formatted memory from virtio-scsi */ + struct scatterlist *tvc_sgl; + /* Pointer to response */ + struct virtio_scsi_cmd_resp __user *tvc_resp; + /* Pointer to vhost_scsi for our device */ + struct vhost_scsi *tvc_vhost; + /* The TCM I/O descriptor that is accessed via container_of() */ + struct se_cmd tvc_se_cmd; + /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ + struct work_struct work; + /* Copy of the incoming SCSI command descriptor block (CDB) */ + unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; + /* Completed commands list, serviced from vhost worker thread */ + struct list_head tvc_completion_list; +}; + +struct tcm_vhost_nexus { + /* Pointer to TCM session for I_T Nexus */ + struct se_session *tvn_se_sess; +}; + +struct tcm_vhost_nacl { + /* Binary World Wide unique Port Name for Vhost Initiator port */ + u64 iport_wwpn; + /* ASCII formatted WWPN for Sas Initiator port */ + char iport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct tcm_vhost_tpg { + /* Vhost port target portal group tag for TCM */ + u16 tport_tpgt; + /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ + atomic_t tv_tpg_port_count; + /* Used for vhost_scsi device reference to tpg_nexus */ + atomic_t tv_tpg_vhost_count; + /* list for tcm_vhost_list */ + struct list_head tv_tpg_list; + /* Used to protect access for tpg_nexus */ + struct mutex tv_tpg_mutex; + /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ + struct tcm_vhost_nexus *tpg_nexus; + /* Pointer back to tcm_vhost_tport */ + struct tcm_vhost_tport *tport; + /* Returned by tcm_vhost_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct tcm_vhost_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* Binary World Wide unique Port Name for Vhost Target port */ + u64 tport_wwpn; + /* ASCII formatted WWPN for Vhost Target port */ + char tport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_tport() */ + struct se_wwn tport_wwn; +}; + +/* + * As per request from MST, keep TCM_VHOST related ioctl defines out of + * linux/vhost.h (user-space) for now.. + */ + +#include <linux/vhost.h> + +/* + * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. + * + * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + + * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage + */ + +#define VHOST_SCSI_ABI_VERSION 0 + +struct vhost_scsi_target { + int abi_version; + unsigned char vhost_wwpn[TRANSPORT_IQN_LEN]; + unsigned short vhost_tpgt; +}; + +/* VHOST_SCSI specific defines */ +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target) diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index b0b2ac335347..747442d2c0f6 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -90,7 +90,8 @@ #undef DEBUG #ifdef DEBUG -#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); +#define DBG(fmt, args...) \ + printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); #else #define DBG(fmt, args...) #endif @@ -449,8 +450,9 @@ static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par); #if 0 static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, - void __iomem *bios); -static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); + void __iomem *bios); +static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, + const struct aty128fb_par *par); #endif static void aty128_timings(struct aty128fb_par *par); static void aty128_init_engine(struct aty128fb_par *par); @@ -779,7 +781,8 @@ static u32 depth_to_dst(u32 depth) #ifndef __sparc__ -static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) +static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, + struct pci_dev *dev) { u16 dptr; u8 rom_type; @@ -811,13 +814,14 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s /* Look for the PCI data to check the ROM type */ dptr = BIOS_IN16(0x18); - /* Check the PCI data signature. If it's wrong, we still assume a normal x86 ROM - * for now, until I've verified this works everywhere. The goal here is more - * to phase out Open Firmware images. + /* Check the PCI data signature. If it's wrong, we still assume a normal + * x86 ROM for now, until I've verified this works everywhere. + * The goal here is more to phase out Open Firmware images. * - * Currently, we only look at the first PCI data, we could iteratre and deal with - * them all, and we should use fb_bios_start relative to start of image and not - * relative start of ROM, but so far, I never found a dual-image ATI card + * Currently, we only look at the first PCI data, we could iteratre and + * deal with them all, and we should use fb_bios_start relative to start + * of image and not relative start of ROM, but so far, I never found a + * dual-image ATI card. * * typedef struct { * u32 signature; + 0x00 @@ -852,7 +856,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n"); goto failed; default: - printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type); + printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", + rom_type); goto failed; } anyway: @@ -863,7 +868,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s return NULL; } -static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) +static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, + unsigned char __iomem *bios) { unsigned int bios_hdr; unsigned int bios_pll; @@ -1247,10 +1253,13 @@ static int aty128_crtc_to_var(const struct aty128_crtc *crtc, static void aty128_set_crt_enable(struct aty128fb_par *par, int on) { if (on) { - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON); - aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN)); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | + CRT_CRTC_ON); + aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | + DAC_PALETTE2_SNOOP_EN)); } else - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & + ~CRT_CRTC_ON); } static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) @@ -1281,7 +1290,8 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) } } -static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par) +static void aty128_set_pll(struct aty128_pll *pll, + const struct aty128fb_par *par) { u32 div3; @@ -1366,7 +1376,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll, } -static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var) +static int aty128_pll_to_var(const struct aty128_pll *pll, + struct fb_var_screeninfo *var) { var->pixclock = 100000000 / pll->vclk; @@ -1512,7 +1523,8 @@ static int aty128fb_set_par(struct fb_info *info) * encode/decode the User Defined Part of the Display */ -static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par) +static int aty128_decode_var(struct fb_var_screeninfo *var, + struct aty128fb_par *par) { int err; struct aty128_crtc crtc; @@ -1559,7 +1571,8 @@ static int aty128_encode_var(struct fb_var_screeninfo *var, } -static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int aty128fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct aty128fb_par par; int err; @@ -1575,7 +1588,8 @@ static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf /* * Pan or Wrap the Display */ -static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb) +static int aty128fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fb) { struct aty128fb_par *par = fb->par; u32 xoffset, yoffset; @@ -1594,7 +1608,8 @@ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *f par->crtc.xoffset = xoffset; par->crtc.yoffset = yoffset; - offset = ((yoffset * par->crtc.vxres + xoffset)*(par->crtc.bpp >> 3)) & ~7; + offset = ((yoffset * par->crtc.vxres + xoffset) * (par->crtc.bpp >> 3)) + & ~7; if (par->crtc.bpp == 24) offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */ @@ -1620,11 +1635,13 @@ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue, * do mirroring */ - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | + DAC_PALETTE_ACCESS_CNTL); aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); #endif - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & + ~DAC_PALETTE_ACCESS_CNTL); } aty_st_8(PALETTE_INDEX, regno); @@ -1753,7 +1770,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) aty_st_le32(LVDS_GEN_CNTL, reg); } reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, level) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_ON | LVDS_EN; reg &= ~LVDS_DISPLAY_DIS; @@ -1764,7 +1782,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) #endif } else { reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, 0) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); @@ -1869,7 +1888,8 @@ static void aty128_early_resume(void *data) } #endif /* CONFIG_PPC_PMAC */ -static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_init(struct pci_dev *pdev, + const struct pci_device_id *ent) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; @@ -1887,7 +1907,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* range check to make sure */ if (ent->driver_data < ARRAY_SIZE(r128_family)) - strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card)); + strlcat(video_card, r128_family[ent->driver_data], + sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); @@ -1911,11 +1932,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); -#if 0 /* Disable the early video resume hack for now as it's causing problems, among - * others we now rely on the PCI core restoring the config space for us, which - * isn't the case with that hack, and that code path causes various things to - * be called with interrupts off while they shouldn't. I'm leaving the code in - * as it can be useful for debugging purposes +#if 0 /* Disable the early video resume hack for now as it's causing problems, + * among others we now rely on the PCI core restoring the config space + * for us, which isn't the case with that hack, and that code path causes + * various things to be called with interrupts off while they shouldn't. + * I'm leaving the code in as it can be useful for debugging purposes */ pmac_set_early_video_resume(aty128_early_resume, par); #endif @@ -1953,11 +1974,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i default_vmode = VMODE_1152_768_60; if (default_cmode > 16) - default_cmode = CMODE_32; + default_cmode = CMODE_32; else if (default_cmode > 8) - default_cmode = CMODE_16; + default_cmode = CMODE_16; else - default_cmode = CMODE_8; + default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; @@ -2018,7 +2039,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i #ifdef CONFIG_PCI /* register a card ++ajoshi */ -static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { unsigned long fb_addr, reg_addr; struct aty128fb_par *par; @@ -2318,39 +2340,39 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, u_int width, u_int height, struct fb_info_aty128 *par) { - u32 save_dp_datatype, save_dp_cntl, dstval; - - if (!width || !height) - return; - - dstval = depth_to_dst(par->current_par.crtc.depth); - if (dstval == DST_24BPP) { - srcx *= 3; - dstx *= 3; - width *= 3; - } else if (dstval == -EINVAL) { - printk("aty128fb: invalid depth or RGBA\n"); - return; - } - - wait_for_fifo(2, par); - save_dp_datatype = aty_ld_le32(DP_DATATYPE); - save_dp_cntl = aty_ld_le32(DP_CNTL); - - wait_for_fifo(6, par); - aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); - aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); - aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); - aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); - - aty_st_le32(DST_Y_X, (dsty << 16) | dstx); - aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); - - par->blitter_may_be_busy = 1; - - wait_for_fifo(2, par); - aty_st_le32(DP_DATATYPE, save_dp_datatype); - aty_st_le32(DP_CNTL, save_dp_cntl); + u32 save_dp_datatype, save_dp_cntl, dstval; + + if (!width || !height) + return; + + dstval = depth_to_dst(par->current_par.crtc.depth); + if (dstval == DST_24BPP) { + srcx *= 3; + dstx *= 3; + width *= 3; + } else if (dstval == -EINVAL) { + printk("aty128fb: invalid depth or RGBA\n"); + return; + } + + wait_for_fifo(2, par); + save_dp_datatype = aty_ld_le32(DP_DATATYPE); + save_dp_cntl = aty_ld_le32(DP_CNTL); + + wait_for_fifo(6, par); + aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); + aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); + aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); + aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); + + aty_st_le32(DST_Y_X, (dsty << 16) | dstx); + aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); + + par->blitter_may_be_busy = 1; + + wait_for_fifo(2, par); + aty_st_le32(DP_DATATYPE, save_dp_datatype); + aty_st_le32(DP_CNTL, save_dp_cntl); } @@ -2358,17 +2380,17 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, * Text mode accelerated functions */ -static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx, - int height, int width) +static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, + int dx, int height, int width) { - sx *= fontwidth(p); - sy *= fontheight(p); - dx *= fontwidth(p); - dy *= fontheight(p); - width *= fontwidth(p); - height *= fontheight(p); - - aty128_rectcopy(sx, sy, dx, dy, width, height, + sx *= fontwidth(p); + sy *= fontheight(p); + dx *= fontwidth(p); + dy *= fontheight(p); + width *= fontwidth(p); + height *= fontheight(p); + + aty128_rectcopy(sx, sy, dx, dy, width, height, (struct fb_info_aty128 *)p->fb_info); } #endif /* 0 */ diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 2e471c22abf5..f8a79fca4a22 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -372,8 +372,12 @@ static void fb_flashcursor(struct work_struct *work) struct vc_data *vc = NULL; int c; int mode; + int ret; + + ret = console_trylock(); + if (ret == 0) + return; - console_lock(); if (ops && ops->currcon != -1) vc = vc_cons[ops->currcon].d; diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 47118c75a4c0..7ae9d53f2bf1 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -30,7 +30,10 @@ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/console.h> +#include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/delay.h> +#include <linux/lcm.h> #include <video/da8xx-fb.h> #include <asm/div64.h> @@ -160,6 +163,13 @@ struct da8xx_fb_par { wait_queue_head_t vsync_wait; int vsync_flag; int vsync_timeout; + spinlock_t lock_for_chan_update; + + /* + * LCDC has 2 ping pong DMA channels, channel 0 + * and channel 1. + */ + unsigned int which_dma_channel_done; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; unsigned int lcd_fck_rate; @@ -260,10 +270,18 @@ static inline void lcd_enable_raster(void) { u32 reg; + /* Put LCDC in reset for several cycles */ + if (lcd_revision == LCD_VERSION_2) + /* Write 1 to reset LCDC */ + lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); + mdelay(1); + /* Bring LCDC out of reset */ if (lcd_revision == LCD_VERSION_2) lcdc_write(0, LCD_CLK_RESET_REG); + mdelay(1); + /* Above reset sequence doesnot reset register context */ reg = lcdc_read(LCD_RASTER_CTRL_REG); if (!(reg & LCD_RASTER_ENABLE)) lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); @@ -277,10 +295,6 @@ static inline void lcd_disable_raster(void) reg = lcdc_read(LCD_RASTER_CTRL_REG); if (reg & LCD_RASTER_ENABLE) lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); - - if (lcd_revision == LCD_VERSION_2) - /* Write 1 to reset LCDC */ - lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); } static void lcd_blit(int load_mode, struct da8xx_fb_par *par) @@ -344,8 +358,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par) lcd_enable_raster(); } -/* Configure the Burst Size of DMA */ -static int lcd_cfg_dma(int burst_size) +/* Configure the Burst Size and fifo threhold of DMA */ +static int lcd_cfg_dma(int burst_size, int fifo_th) { u32 reg; @@ -369,6 +383,9 @@ static int lcd_cfg_dma(int burst_size) default: return -EINVAL; } + + reg |= (fifo_th << 8); + lcdc_write(reg, LCD_DMA_CTRL_REG); return 0; @@ -670,8 +687,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) & ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG); - /* Configure the DMA burst size. */ - ret = lcd_cfg_dma(cfg->dma_burst_sz); + /* Configure the DMA burst size and fifo threshold. */ + ret = lcd_cfg_dma(cfg->dma_burst_sz, cfg->fifo_th); if (ret < 0) return ret; @@ -715,7 +732,6 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) { struct da8xx_fb_par *par = arg; u32 stat = lcdc_read(LCD_MASKED_STAT_REG); - u32 reg_int; if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { lcd_disable_raster(); @@ -732,10 +748,8 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) lcdc_write(stat, LCD_MASKED_STAT_REG); - /* Disable PL completion inerrupt */ - reg_int = lcdc_read(LCD_INT_ENABLE_CLR_REG) | - (LCD_V2_PL_INT_ENA); - lcdc_write(reg_int, LCD_INT_ENABLE_CLR_REG); + /* Disable PL completion interrupt */ + lcdc_write(LCD_V2_PL_INT_ENA, LCD_INT_ENABLE_CLR_REG); /* Setup and start data loading mode */ lcd_blit(LOAD_DATA, par); @@ -743,6 +757,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) lcdc_write(stat, LCD_MASKED_STAT_REG); if (stat & LCD_END_OF_FRAME0) { + par->which_dma_channel_done = 0; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, @@ -752,6 +767,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) } if (stat & LCD_END_OF_FRAME1) { + par->which_dma_channel_done = 1; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, @@ -798,6 +814,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg) lcdc_write(stat, LCD_STAT_REG); if (stat & LCD_END_OF_FRAME0) { + par->which_dma_channel_done = 0; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, @@ -807,6 +824,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg) } if (stat & LCD_END_OF_FRAME1) { + par->which_dma_channel_done = 1; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, @@ -1021,11 +1039,14 @@ static int cfb_blank(int blank, struct fb_info *info) par->blank = blank; switch (blank) { case FB_BLANK_UNBLANK: + lcd_enable_raster(); + if (par->panel_power_ctrl) par->panel_power_ctrl(1); - - lcd_enable_raster(); break; + case FB_BLANK_NORMAL: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: if (par->panel_power_ctrl) par->panel_power_ctrl(0); @@ -1052,6 +1073,7 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix = &fbi->fix; unsigned int end; unsigned int start; + unsigned long irq_flags; if (var->xoffset != fbi->var.xoffset || var->yoffset != fbi->var.yoffset) { @@ -1069,6 +1091,21 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var, end = start + fbi->var.yres * fix->line_length - 1; par->dma_start = start; par->dma_end = end; + spin_lock_irqsave(&par->lock_for_chan_update, + irq_flags); + if (par->which_dma_channel_done == 0) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + } else if (par->which_dma_channel_done == 1) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); + } + spin_unlock_irqrestore(&par->lock_for_chan_update, + irq_flags); } } @@ -1114,6 +1151,7 @@ static int __devinit fb_probe(struct platform_device *device) struct da8xx_fb_par *par; resource_size_t len; int ret, i; + unsigned long ulcm; if (fb_pdata == NULL) { dev_err(&device->dev, "Can not get platform data\n"); @@ -1209,7 +1247,8 @@ static int __devinit fb_probe(struct platform_device *device) /* allocate frame buffer */ par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp; - par->vram_size = PAGE_ALIGN(par->vram_size/8); + ulcm = lcm((lcdc_info->width * lcd_cfg->bpp)/8, PAGE_SIZE); + par->vram_size = roundup(par->vram_size/8, ulcm); par->vram_size = par->vram_size * LCD_NUM_BUFFERS; par->vram_virt = dma_alloc_coherent(NULL, @@ -1296,6 +1335,8 @@ static int __devinit fb_probe(struct platform_device *device) /* initialize the vsync wait queue */ init_waitqueue_head(&par->vsync_wait); par->vsync_timeout = HZ / 5; + par->which_dma_channel_done = -1; + spin_lock_init(&par->lock_for_chan_update); /* Register the Frame Buffer */ if (register_framebuffer(da8xx_fb_info) < 0) { @@ -1382,11 +1423,12 @@ static int fb_resume(struct platform_device *dev) struct da8xx_fb_par *par = info->par; console_lock(); + clk_enable(par->lcdc_clk); + lcd_enable_raster(); + if (par->panel_power_ctrl) par->panel_power_ctrl(1); - clk_enable(par->lcdc_clk); - lcd_enable_raster(); fb_set_suspend(info, 0); console_unlock(); diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c index a268cbf1cbea..68b9b511ce80 100644 --- a/drivers/video/epson1355fb.c +++ b/drivers/video/epson1355fb.c @@ -477,11 +477,11 @@ static __init unsigned int get_fb_size(struct fb_info *info) return size; } -static int epson1355_width_tab[2][4] __initdata = +static int epson1355_width_tab[2][4] __devinitdata = { {4, 8, 16, -1}, {9, 12, 16, -1} }; -static int epson1355_bpp_tab[8] __initdata = { 1, 2, 4, 8, 15, 16 }; +static int epson1355_bpp_tab[8] __devinitdata = { 1, 2, 4, 8, 15, 16 }; -static void __init fetch_hw_state(struct fb_info *info, struct epson1355_par *par) +static void __devinit fetch_hw_state(struct fb_info *info, struct epson1355_par *par) { struct fb_var_screeninfo *var = &info->var; struct fb_fix_screeninfo *fix = &info->fix; @@ -601,7 +601,7 @@ static int epson1355fb_remove(struct platform_device *dev) return 0; } -int __devinit epson1355fb_probe(struct platform_device *dev) +static int __devinit epson1355fb_probe(struct platform_device *dev) { struct epson1355_par *default_par; struct fb_info *info; diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c index a36b2d28280e..c6c016a506ce 100644 --- a/drivers/video/exynos/exynos_dp_core.c +++ b/drivers/video/exynos/exynos_dp_core.c @@ -47,7 +47,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) exynos_dp_init_hpd(dp); - udelay(200); + usleep_range(200, 210); while (exynos_dp_get_plug_in_status(dp) != 0) { timeout_loop++; @@ -55,7 +55,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) dev_err(dp->dev, "failed to get hpd plug status\n"); return -ETIMEDOUT; } - udelay(10); + usleep_range(10, 11); } return 0; @@ -304,7 +304,7 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp) buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 | DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0; exynos_dp_write_bytes_to_dpcd(dp, - DPCD_ADDR_TRAINING_PATTERN_SET, + DPCD_ADDR_TRAINING_LANE0_SET, lane_count, buf); } @@ -336,7 +336,7 @@ static int exynos_dp_channel_eq_ok(u8 link_status[6], int lane_count) u8 lane_status; lane_align = link_status[2]; - if ((lane_align == DPCD_INTERLANE_ALIGN_DONE) == 0) + if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0) return -EINVAL; for (lane = 0; lane < lane_count; lane++) { @@ -407,6 +407,9 @@ static unsigned int exynos_dp_get_lane_link_training( case 3: reg = exynos_dp_get_lane3_link_training(dp); break; + default: + WARN_ON(1); + return 0; } return reg; @@ -483,7 +486,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) u8 pre_emphasis; u8 training_lane; - udelay(100); + usleep_range(100, 101); exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS, 6, link_status); @@ -501,7 +504,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) buf[0] = DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_2; exynos_dp_write_byte_to_dpcd(dp, - DPCD_ADDR_TRAINING_LANE0_SET, + DPCD_ADDR_TRAINING_PATTERN_SET, buf[0]); for (lane = 0; lane < lane_count; lane++) { @@ -568,7 +571,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) u8 adjust_request[2]; - udelay(400); + usleep_range(400, 401); exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS, 6, link_status); @@ -736,7 +739,7 @@ static int exynos_dp_set_link_train(struct exynos_dp_device *dp, if (retval == 0) break; - udelay(100); + usleep_range(100, 110); } return retval; @@ -770,7 +773,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - udelay(1); + usleep_range(1, 2); } /* Set to use the register calculated M/N video */ @@ -804,7 +807,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - mdelay(1); + usleep_range(1000, 1001); } if (retval != 0) diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h index 1e0f998e0c9f..8526e548c385 100644 --- a/drivers/video/exynos/exynos_dp_core.h +++ b/drivers/video/exynos/exynos_dp_core.h @@ -85,10 +85,6 @@ void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); -void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); -void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); -void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); -void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable); void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, enum pattern_set pattern); diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c index bcb0e3ae1e9d..2db5b9aa250a 100644 --- a/drivers/video/exynos/exynos_dp_reg.c +++ b/drivers/video/exynos/exynos_dp_reg.c @@ -122,7 +122,7 @@ void exynos_dp_reset(struct exynos_dp_device *dp) LS_CLK_DOMAIN_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); - udelay(20); + usleep_range(20, 30); exynos_dp_lane_swap(dp, 0); @@ -988,7 +988,7 @@ void exynos_dp_reset_macro(struct exynos_dp_device *dp) writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); /* 10 us is the minimum reset time. */ - udelay(10); + usleep_range(10, 20); reg &= ~MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c index 9908e75ae761..4bc2b8a5dd8b 100644 --- a/drivers/video/exynos/exynos_mipi_dsi.c +++ b/drivers/video/exynos/exynos_mipi_dsi.c @@ -154,7 +154,7 @@ static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power) if (client_drv && client_drv->power_on) client_drv->power_on(client_dev, 1); - exynos_mipi_regulator_disable(dsim); + exynos_mipi_regulator_enable(dsim); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) diff --git a/drivers/video/exynos/s6e8ax0.h b/drivers/video/exynos/s6e8ax0.h deleted file mode 100644 index 1f1b270484b0..000000000000 --- a/drivers/video/exynos/s6e8ax0.h +++ /dev/null @@ -1,21 +0,0 @@ -/* linux/drivers/video/backlight/s6e8ax0.h - * - * MIPI-DSI based s6e8ax0 AMOLED LCD Panel definitions. - * - * Copyright (c) 2011 Samsung Electronics - * - * Inki Dae, <inki.dae@samsung.com> - * Donghwa Lee <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _S6E8AX0_H -#define _S6E8AX0_H - -extern void s6e8ax0_init(void); - -#endif - diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 1ddeb11659d4..64cda560c488 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -104,6 +104,8 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, deferred framebuffer IO. then if userspace touches a page again, we repeat the same scheme */ + file_update_time(vma->vm_file); + /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h index 04c01faaf772..624ee115f129 100644 --- a/drivers/video/fb_draw.h +++ b/drivers/video/fb_draw.h @@ -3,6 +3,7 @@ #include <asm/types.h> #include <linux/fb.h> +#include <linux/bug.h> /* * Compose two values, using a bitmask as decision value @@ -41,7 +42,8 @@ pixel_to_pat( u32 bpp, u32 pixel) case 32: return 0x0000000100000001ul*pixel; default: - panic("pixel_to_pat(): unsupported pixelformat\n"); + WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); + return 0; } } #else @@ -66,7 +68,8 @@ pixel_to_pat( u32 bpp, u32 pixel) case 32: return 0x00000001ul*pixel; default: - panic("pixel_to_pat(): unsupported pixelformat\n"); + WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); + return 0; } } #endif diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c index da066c210923..5245f9a71892 100644 --- a/drivers/video/grvga.c +++ b/drivers/video/grvga.c @@ -354,7 +354,7 @@ static int __devinit grvga_probe(struct platform_device *dev) */ if (fb_get_options("grvga", &options)) { retval = -ENODEV; - goto err; + goto free_fb; } if (!options || !*options) @@ -370,7 +370,7 @@ static int __devinit grvga_probe(struct platform_device *dev) if (grvga_parse_custom(this_opt, &info->var) < 0) { dev_err(&dev->dev, "Failed to parse custom mode (%s).\n", this_opt); retval = -EINVAL; - goto err1; + goto free_fb; } } else if (!strncmp(this_opt, "addr", 4)) grvga_fix_addr = simple_strtoul(this_opt + 5, NULL, 16); @@ -387,10 +387,11 @@ static int __devinit grvga_probe(struct platform_device *dev) info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN; info->fix.smem_len = grvga_mem_size; - if (!request_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]), "grlib-svgactrl regs")) { + if (!devm_request_mem_region(&dev->dev, dev->resource[0].start, + resource_size(&dev->resource[0]), "grlib-svgactrl regs")) { dev_err(&dev->dev, "registers already mapped\n"); retval = -EBUSY; - goto err; + goto free_fb; } par->regs = of_ioremap(&dev->resource[0], 0, @@ -400,14 +401,14 @@ static int __devinit grvga_probe(struct platform_device *dev) if (!par->regs) { dev_err(&dev->dev, "failed to map registers\n"); retval = -ENOMEM; - goto err1; + goto free_fb; } retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { dev_err(&dev->dev, "failed to allocate mem with fb_alloc_cmap\n"); retval = -ENOMEM; - goto err2; + goto unmap_regs; } if (mode_opt) { @@ -415,7 +416,7 @@ static int __devinit grvga_probe(struct platform_device *dev) grvga_modedb, sizeof(grvga_modedb), &grvga_modedb[0], 8); if (!retval || retval == 4) { retval = -EINVAL; - goto err3; + goto dealloc_cmap; } } @@ -427,10 +428,11 @@ static int __devinit grvga_probe(struct platform_device *dev) physical_start = grvga_fix_addr; - if (!request_mem_region(physical_start, grvga_mem_size, dev->name)) { + if (!devm_request_mem_region(&dev->dev, physical_start, + grvga_mem_size, dev->name)) { dev_err(&dev->dev, "failed to request memory region\n"); retval = -ENOMEM; - goto err3; + goto dealloc_cmap; } virtual_start = (unsigned long) ioremap(physical_start, grvga_mem_size); @@ -438,7 +440,7 @@ static int __devinit grvga_probe(struct platform_device *dev) if (!virtual_start) { dev_err(&dev->dev, "error mapping framebuffer memory\n"); retval = -ENOMEM; - goto err4; + goto dealloc_cmap; } } else { /* Allocate frambuffer memory */ @@ -451,7 +453,7 @@ static int __devinit grvga_probe(struct platform_device *dev) "unable to allocate framebuffer memory (%lu bytes)\n", grvga_mem_size); retval = -ENOMEM; - goto err3; + goto dealloc_cmap; } physical_start = dma_map_single(&dev->dev, (void *)virtual_start, grvga_mem_size, DMA_TO_DEVICE); @@ -484,7 +486,7 @@ static int __devinit grvga_probe(struct platform_device *dev) retval = register_framebuffer(info); if (retval < 0) { dev_err(&dev->dev, "failed to register framebuffer\n"); - goto err4; + goto free_mem; } __raw_writel(physical_start, &par->regs->fb_pos); @@ -493,21 +495,18 @@ static int __devinit grvga_probe(struct platform_device *dev) return 0; -err4: +free_mem: dev_set_drvdata(&dev->dev, NULL); - if (grvga_fix_addr) { - release_mem_region(physical_start, grvga_mem_size); + if (grvga_fix_addr) iounmap((void *)virtual_start); - } else + else kfree((void *)virtual_start); -err3: +dealloc_cmap: fb_dealloc_cmap(&info->cmap); -err2: +unmap_regs: of_iounmap(&dev->resource[0], par->regs, resource_size(&dev->resource[0])); -err1: - release_mem_region(dev->resource[0].start, resource_size(&dev->resource[0])); -err: +free_fb: framebuffer_release(info); return retval; @@ -524,12 +523,10 @@ static int __devexit grvga_remove(struct platform_device *device) of_iounmap(&device->resource[0], par->regs, resource_size(&device->resource[0])); - release_mem_region(device->resource[0].start, resource_size(&device->resource[0])); - if (!par->fb_alloced) { - release_mem_region(info->fix.smem_start, info->fix.smem_len); + if (!par->fb_alloced) iounmap(info->screen_base); - } else + else kfree((void *)info->screen_base); framebuffer_release(info); diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index eec0d7b748eb..c89f8a8d36d2 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -269,7 +269,7 @@ struct mx3fb_info { dma_cookie_t cookie; struct scatterlist sg[2]; - u32 sync; /* preserve var->sync flags */ + struct fb_var_screeninfo cur_var; /* current var info */ }; static void mx3fb_dma_done(void *); @@ -698,9 +698,29 @@ static void mx3fb_dma_done(void *arg) complete(&mx3_fbi->flip_cmpl); } +static bool mx3fb_must_set_par(struct fb_info *fbi) +{ + struct mx3fb_info *mx3_fbi = fbi->par; + struct fb_var_screeninfo old_var = mx3_fbi->cur_var; + struct fb_var_screeninfo new_var = fbi->var; + + if ((fbi->var.activate & FB_ACTIVATE_FORCE) && + (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) + return true; + + /* + * Ignore xoffset and yoffset update, + * because pan display handles this case. + */ + old_var.xoffset = new_var.xoffset; + old_var.yoffset = new_var.yoffset; + + return !!memcmp(&old_var, &new_var, sizeof(struct fb_var_screeninfo)); +} + static int __set_par(struct fb_info *fbi, bool lock) { - u32 mem_len; + u32 mem_len, cur_xoffset, cur_yoffset; struct ipu_di_signal_cfg sig_cfg; enum ipu_panel mode = IPU_PANEL_TFT; struct mx3fb_info *mx3_fbi = fbi->par; @@ -780,8 +800,25 @@ static int __set_par(struct fb_info *fbi, bool lock) video->out_height = fbi->var.yres; video->out_stride = fbi->var.xres_virtual; - if (mx3_fbi->blank == FB_BLANK_UNBLANK) + if (mx3_fbi->blank == FB_BLANK_UNBLANK) { sdc_enable_channel(mx3_fbi); + /* + * sg[0] points to fb smem_start address + * and is actually active in controller. + */ + mx3_fbi->cur_var.xoffset = 0; + mx3_fbi->cur_var.yoffset = 0; + } + + /* + * Preserve xoffset and yoffest in case they are + * inactive in controller as fb is blanked. + */ + cur_xoffset = mx3_fbi->cur_var.xoffset; + cur_yoffset = mx3_fbi->cur_var.yoffset; + mx3_fbi->cur_var = fbi->var; + mx3_fbi->cur_var.xoffset = cur_xoffset; + mx3_fbi->cur_var.yoffset = cur_yoffset; return 0; } @@ -802,7 +839,7 @@ static int mx3fb_set_par(struct fb_info *fbi) mutex_lock(&mx3_fbi->mutex); - ret = __set_par(fbi, true); + ret = mx3fb_must_set_par(fbi) ? __set_par(fbi, true) : 0; mutex_unlock(&mx3_fbi->mutex); @@ -901,8 +938,8 @@ static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) var->grayscale = 0; /* Preserve sync flags */ - var->sync |= mx3_fbi->sync; - mx3_fbi->sync |= var->sync; + var->sync |= mx3_fbi->cur_var.sync; + mx3_fbi->cur_var.sync |= var->sync; return 0; } @@ -1043,8 +1080,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, return -EINVAL; } - if (fbi->var.xoffset == var->xoffset && - fbi->var.yoffset == var->yoffset) + if (mx3_fbi->cur_var.xoffset == var->xoffset && + mx3_fbi->cur_var.yoffset == var->yoffset) return 0; /* No change, do nothing */ y_bottom = var->yoffset; @@ -1127,6 +1164,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, else fbi->var.vmode &= ~FB_VMODE_YWRAP; + mx3_fbi->cur_var = fbi->var; + mutex_unlock(&mx3_fbi->mutex); dev_dbg(fbi->device, "Update complete\n"); diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c index ad741c3d1ae1..eaeed4340e04 100644 --- a/drivers/video/omap2/displays/panel-acx565akm.c +++ b/drivers/video/omap2/displays/panel-acx565akm.c @@ -487,6 +487,13 @@ static struct omap_video_timings acx_panel_timings = { .vfp = 3, .vsw = 3, .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int acx_panel_probe(struct omap_dss_device *dssdev) @@ -498,8 +505,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev) struct backlight_properties props; dev_dbg(&dssdev->dev, "%s\n", __func__); - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; + /* FIXME AC bias ? */ dssdev->panel.timings = acx_panel_timings; diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c index e42f9dc22123..bc5af2500eb9 100644 --- a/drivers/video/omap2/displays/panel-generic-dpi.c +++ b/drivers/video/omap2/displays/panel-generic-dpi.c @@ -40,12 +40,6 @@ struct panel_config { struct omap_video_timings timings; - int acbi; /* ac-bias pin transitions per interrupt */ - /* Unit: line clocks */ - int acb; /* ac-bias pin frequency */ - - enum omap_panel_config config; - int power_on_delay; int power_off_delay; @@ -73,11 +67,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 11, .vfp = 3, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, .power_on_delay = 50, .power_off_delay = 100, .name = "sharp_lq", @@ -98,11 +94,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x28, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 50, .power_off_delay = 100, .name = "sharp_ls", @@ -123,12 +121,13 @@ static struct panel_config generic_dpi_panels[] = { .vfp = 4, .vsw = 2, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC | - OMAP_DSS_LCD_ONOFF, .power_on_delay = 0, .power_off_delay = 0, .name = "toppoly_tdo35s", @@ -149,11 +148,13 @@ static struct panel_config generic_dpi_panels[] = { .vfp = 4, .vsw = 10, .vbp = 12 - 10, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "samsung_lte430wq_f0c", @@ -174,11 +175,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 4, .vbp = 11, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "seiko_70wvw1tz3", @@ -199,11 +202,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 2, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, .power_on_delay = 0, .power_off_delay = 0, .name = "powertip_ph480272t", @@ -224,11 +229,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 3, .vfp = 12, .vbp = 25, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x28, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "innolux_at070tn83", @@ -249,9 +256,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 2, .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .name = "nec_nl2432dr22-11b", }, @@ -270,9 +281,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 1, .vbp = 1, - }, - .config = OMAP_DSS_LCD_TFT, + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "h4", }, @@ -291,10 +306,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 2, .vbp = 2, - }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "apollon", }, /* FocalTech ETM070003DH6 */ @@ -312,9 +330,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 3, .vfp = 13, .vbp = 29, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .name = "focaltech_etm070003dh6", }, @@ -333,11 +355,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 23, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .power_on_delay = 0, .power_off_delay = 0, .name = "microtips_umsh_8173md", @@ -358,9 +382,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 4, .vbp = 2, - }, - .config = OMAP_DSS_LCD_TFT, + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "ortustech_com43h4m10xtc", }, @@ -379,11 +407,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 12, .vbp = 23, - }, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "innolux_at080tn52", }, @@ -401,8 +431,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 26, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT, .name = "mitsubishi_aa084sb01", }, /* EDT ET0500G0DH6 */ @@ -419,8 +454,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 35, .vbp = 10, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT, .name = "edt_et0500g0dh6", }, @@ -439,9 +479,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 10, .vbp = 33, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pd050vl1", }, @@ -460,9 +504,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 10, .vbp = 33, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pm070wl4", }, @@ -481,9 +529,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 4, .vfp = 1, .vbp = 23, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pd104slf", }, }; @@ -573,10 +625,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev) if (!panel_config) return -EINVAL; - dssdev->panel.config = panel_config->config; dssdev->panel.timings = panel_config->timings; - dssdev->panel.acb = panel_config->acb; - dssdev->panel.acbi = panel_config->acbi; drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); if (!drv_data) diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c index 0841cc2b3f77..802807798846 100644 --- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c @@ -40,6 +40,12 @@ static struct omap_video_timings lb035q02_timings = { .vsw = 2, .vfp = 4, .vbp = 18, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int lb035q02_panel_power_on(struct omap_dss_device *dssdev) @@ -82,8 +88,6 @@ static int lb035q02_panel_probe(struct omap_dss_device *dssdev) struct lb035q02_data *ld; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; dssdev->panel.timings = lb035q02_timings; ld = kzalloc(sizeof(*ld), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c index 4a34cdc1371b..e6c115373c00 100644 --- a/drivers/video/omap2/displays/panel-n8x0.c +++ b/drivers/video/omap2/displays/panel-n8x0.c @@ -473,7 +473,6 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev) mutex_init(&ddata->lock); - dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings.x_res = 800; dssdev->panel.timings.y_res = 480; dssdev->ctrl.pixel_size = 16; diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c index 8b38b39213f4..b122b0f31c43 100644 --- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c +++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c @@ -76,6 +76,12 @@ static struct omap_video_timings nec_8048_panel_timings = { .vfp = 3, .vsw = 1, .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, }; static int nec_8048_bl_update_status(struct backlight_device *bl) @@ -116,9 +122,6 @@ static int nec_8048_panel_probe(struct omap_dss_device *dssdev) struct backlight_properties props; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF | - OMAP_DSS_LCD_ONOFF; dssdev->panel.timings = nec_8048_panel_timings; necd = kzalloc(sizeof(*necd), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c index 98ebdaddab5a..2d35bd388860 100644 --- a/drivers/video/omap2/displays/panel-picodlp.c +++ b/drivers/video/omap2/displays/panel-picodlp.c @@ -69,6 +69,12 @@ static struct omap_video_timings pico_ls_timings = { .vsw = 2, .vfp = 3, .vbp = 14, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; static inline struct picodlp_panel_data @@ -414,9 +420,6 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev) struct i2c_client *picodlp_i2c_client; int r = 0, picodlp_adapter_id; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_ONOFF | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IVS; - dssdev->panel.acb = 0x0; dssdev->panel.timings = pico_ls_timings; picod = kzalloc(sizeof(struct picodlp_data), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c index ba38b3ad17d6..bd86ba9ccf76 100644 --- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c @@ -44,6 +44,12 @@ static struct omap_video_timings sharp_ls_timings = { .vsw = 1, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int sharp_ls_bl_update_status(struct backlight_device *bl) @@ -86,9 +92,6 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev) struct sharp_data *sd; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; - dssdev->panel.acb = 0x28; dssdev->panel.timings = sharp_ls_timings; sd = kzalloc(sizeof(*sd), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 901576eb5a84..3f5acc7771da 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -882,7 +882,6 @@ static int taal_probe(struct omap_dss_device *dssdev) goto err; } - dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings = panel_config->timings; dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c index bff306e041ca..40cc0cfa5d17 100644 --- a/drivers/video/omap2/displays/panel-tfp410.c +++ b/drivers/video/omap2/displays/panel-tfp410.c @@ -39,6 +39,12 @@ static const struct omap_video_timings tfp410_default_timings = { .vfp = 3, .vsw = 4, .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; struct panel_drv_data { @@ -95,7 +101,6 @@ static int tfp410_probe(struct omap_dss_device *dssdev) return -ENOMEM; dssdev->panel.timings = tfp410_default_timings; - dssdev->panel.config = OMAP_DSS_LCD_TFT; ddata->dssdev = dssdev; mutex_init(&ddata->lock); diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index 4b6448b3c31f..fa7baa650ae0 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c @@ -267,6 +267,12 @@ static const struct omap_video_timings tpo_td043_timings = { .vsw = 1, .vfp = 39, .vbp = 34, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) @@ -423,8 +429,6 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev) return -ENODEV; } - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IHS | - OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IPC; dssdev->panel.timings = tpo_td043_timings; dssdev->ctrl.pixel_size = 24; diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 43324e5ed25f..b337a8469fd8 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -52,7 +52,7 @@ config OMAP2_DSS_RFBI DBI is a bus between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DBI spesifications. + See http://www.mipi.org/ for DBI specifications. config OMAP2_DSS_VENC bool "VENC support" @@ -92,7 +92,7 @@ config OMAP2_DSS_DSI DSI is a high speed half-duplex serial interface between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DSI spesifications. + See http://www.mipi.org/ for DSI specifications. config OMAP2_DSS_MIN_FCK_PER_PCK int "Minimum FCK/PCK ratio (for scaling)" diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c index ab22cc224f3e..0fefc68372b9 100644 --- a/drivers/video/omap2/dss/apply.c +++ b/drivers/video/omap2/dss/apply.c @@ -104,6 +104,7 @@ struct mgr_priv_data { bool shadow_extra_info_dirty; struct omap_video_timings timings; + struct dss_lcd_mgr_config lcd_config; }; static struct { @@ -137,6 +138,7 @@ static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr) void dss_apply_init(void) { const int num_ovls = dss_feat_get_num_ovls(); + struct mgr_priv_data *mp; int i; spin_lock_init(&data_lock); @@ -168,16 +170,35 @@ void dss_apply_init(void) op->user_info = op->info; } + + /* + * Initialize some of the lcd_config fields for TV manager, this lets + * us prevent checking if the manager is LCD or TV at some places + */ + mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT]; + + mp->lcd_config.video_port_width = 24; + mp->lcd_config.clock_info.lck_div = 1; + mp->lcd_config.clock_info.pck_div = 1; } +/* + * A LCD manager's stallmode decides whether it is in manual or auto update. TV + * manager is always auto update, stallmode field for TV manager is false by + * default + */ static bool ovl_manual_update(struct omap_overlay *ovl) { - return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + struct mgr_priv_data *mp = get_mgr_priv(ovl->manager); + + return mp->lcd_config.stallmode; } static bool mgr_manual_update(struct omap_overlay_manager *mgr) { - return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + return mp->lcd_config.stallmode; } static int dss_check_settings_low(struct omap_overlay_manager *mgr, @@ -214,7 +235,7 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr, ois[ovl->id] = oi; } - return dss_mgr_check(mgr, mi, &mp->timings, ois); + return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois); } /* @@ -537,7 +558,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); struct omap_overlay_info *oi; - bool ilace, replication; + bool replication; struct mgr_priv_data *mp; int r; @@ -550,11 +571,9 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) mp = get_mgr_priv(ovl->manager); - replication = dss_use_replication(ovl->manager->device, oi->color_mode); - - ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; + replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode); - r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings); + r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings); if (r) { /* * We can't do much here, as this function can be called from @@ -635,6 +654,24 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr) dispc_mgr_set_timings(mgr->id, &mp->timings); + /* lcd_config parameters */ + if (dss_mgr_is_lcd(mgr->id)) { + dispc_mgr_set_io_pad_mode(mp->lcd_config.io_pad_mode); + + dispc_mgr_enable_stallmode(mgr->id, mp->lcd_config.stallmode); + dispc_mgr_enable_fifohandcheck(mgr->id, + mp->lcd_config.fifohandcheck); + + dispc_mgr_set_clock_div(mgr->id, &mp->lcd_config.clock_info); + + dispc_mgr_set_tft_data_lines(mgr->id, + mp->lcd_config.video_port_width); + + dispc_lcd_enable_signal_polarity(mp->lcd_config.lcden_sig_polarity); + + dispc_mgr_set_lcd_type_tft(mgr->id); + } + mp->extra_info_dirty = false; if (mp->updating) mp->shadow_extra_info_dirty = true; @@ -1294,6 +1331,44 @@ void dss_mgr_set_timings(struct omap_overlay_manager *mgr, mutex_unlock(&apply_lock); } +static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mp->lcd_config = *config; + mp->extra_info_dirty = true; +} + +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + unsigned long flags; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mutex_lock(&apply_lock); + + if (mp->enabled) { + DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n", + mgr->name); + goto out; + } + + spin_lock_irqsave(&data_lock, flags); + + dss_apply_mgr_lcd_config(mgr, config); + + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + wait_pending_extra_info_updates(); + +out: + mutex_unlock(&apply_lock); +} + int dss_ovl_set_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 397d4eee11bb..5b289c5f695b 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -119,6 +119,97 @@ enum omap_color_component { DISPC_COLOR_COMPONENT_UV = 1 << 1, }; +enum mgr_reg_fields { + DISPC_MGR_FLD_ENABLE, + DISPC_MGR_FLD_STNTFT, + DISPC_MGR_FLD_GO, + DISPC_MGR_FLD_TFTDATALINES, + DISPC_MGR_FLD_STALLMODE, + DISPC_MGR_FLD_TCKENABLE, + DISPC_MGR_FLD_TCKSELECTION, + DISPC_MGR_FLD_CPR, + DISPC_MGR_FLD_FIFOHANDCHECK, + /* used to maintain a count of the above fields */ + DISPC_MGR_FLD_NUM, +}; + +static const struct { + const char *name; + u32 vsync_irq; + u32 framedone_irq; + u32 sync_lost_irq; + struct reg_field reg_desc[DISPC_MGR_FLD_NUM]; +} mgr_desc[] = { + [OMAP_DSS_CHANNEL_LCD] = { + .name = "LCD", + .vsync_irq = DISPC_IRQ_VSYNC, + .framedone_irq = DISPC_IRQ_FRAMEDONE, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_DIGIT] = { + .name = "DIGIT", + .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, + .framedone_irq = 0, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, + [DISPC_MGR_FLD_STNTFT] = { }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 6, 6 }, + [DISPC_MGR_FLD_TFTDATALINES] = { }, + [DISPC_MGR_FLD_STALLMODE] = { }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 12, 12 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 13, 13 }, + [DISPC_MGR_FLD_CPR] = { }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD2] = { + .name = "LCD2", + .vsync_irq = DISPC_IRQ_VSYNC2, + .framedone_irq = DISPC_IRQ_FRAMEDONE2, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL2, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL2, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL2, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG2, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG2, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG2, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG2, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD3] = { + .name = "LCD3", + .vsync_irq = DISPC_IRQ_VSYNC3, + .framedone_irq = DISPC_IRQ_FRAMEDONE3, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL3, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL3, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL3, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG3, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG3, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG3, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG3, 16, 16 }, + }, + }, +}; + static void _omap_dispc_set_irqs(void); static inline void dispc_write_reg(const u16 idx, u32 val) @@ -131,6 +222,18 @@ static inline u32 dispc_read_reg(const u16 idx) return __raw_readl(dispc.base + idx); } +static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld) +{ + const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + return REG_GET(rfld.reg, rfld.high, rfld.low); +} + +static void mgr_fld_write(enum omap_channel channel, + enum mgr_reg_fields regfld, int val) { + const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low); +} + #define SR(reg) \ dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) #define RR(reg) \ @@ -153,6 +256,10 @@ static void dispc_save_context(void) SR(CONTROL2); SR(CONFIG2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + SR(CONTROL3); + SR(CONFIG3); + } for (i = 0; i < dss_feat_get_num_mgrs(); i++) { SR(DEFAULT_COLOR(i)); @@ -266,6 +373,8 @@ static void dispc_restore_context(void) RR(GLOBAL_ALPHA); if (dss_has_feature(FEAT_MGR_LCD2)) RR(CONFIG2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONFIG3); for (i = 0; i < dss_feat_get_num_mgrs(); i++) { RR(DEFAULT_COLOR(i)); @@ -351,6 +460,8 @@ static void dispc_restore_context(void) RR(CONTROL); if (dss_has_feature(FEAT_MGR_LCD2)) RR(CONTROL2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONTROL3); /* clear spurious SYNC_LOST_DIGIT interrupts */ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT); @@ -387,101 +498,41 @@ void dispc_runtime_put(void) WARN_ON(r < 0 && r != -ENOSYS); } -static inline bool dispc_mgr_is_lcd(enum omap_channel channel) -{ - if (channel == OMAP_DSS_CHANNEL_LCD || - channel == OMAP_DSS_CHANNEL_LCD2) - return true; - else - return false; -} - u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) { - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return DISPC_IRQ_VSYNC; - case OMAP_DSS_CHANNEL_LCD2: - return DISPC_IRQ_VSYNC2; - case OMAP_DSS_CHANNEL_DIGIT: - return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; - default: - BUG(); - return 0; - } + return mgr_desc[channel].vsync_irq; } u32 dispc_mgr_get_framedone_irq(enum omap_channel channel) { - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return DISPC_IRQ_FRAMEDONE; - case OMAP_DSS_CHANNEL_LCD2: - return DISPC_IRQ_FRAMEDONE2; - case OMAP_DSS_CHANNEL_DIGIT: - return 0; - default: - BUG(); - return 0; - } + return mgr_desc[channel].framedone_irq; } bool dispc_mgr_go_busy(enum omap_channel channel) { - int bit; - - if (dispc_mgr_is_lcd(channel)) - bit = 5; /* GOLCD */ - else - bit = 6; /* GODIGIT */ - - if (channel == OMAP_DSS_CHANNEL_LCD2) - return REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - return REG_GET(DISPC_CONTROL, bit, bit) == 1; + return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; } void dispc_mgr_go(enum omap_channel channel) { - int bit; bool enable_bit, go_bit; - if (dispc_mgr_is_lcd(channel)) - bit = 0; /* LCDENABLE */ - else - bit = 1; /* DIGITALENABLE */ - /* if the channel is not enabled, we don't need GO */ - if (channel == OMAP_DSS_CHANNEL_LCD2) - enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + enable_bit = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE) == 1; if (!enable_bit) return; - if (dispc_mgr_is_lcd(channel)) - bit = 5; /* GOLCD */ - else - bit = 6; /* GODIGIT */ - - if (channel == OMAP_DSS_CHANNEL_LCD2) - go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + go_bit = mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; if (go_bit) { DSSERR("GO bit not down for channel %d\n", channel); return; } - DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : - (channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT")); + DSSDBG("GO %s\n", mgr_desc[channel].name); - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); - else - REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); + mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1); } static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value) @@ -832,6 +883,15 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) chan = 0; chan2 = 1; break; + case OMAP_DSS_CHANNEL_LCD3: + if (dss_has_feature(FEAT_MGR_LCD3)) { + chan = 0; + chan2 = 2; + } else { + BUG(); + return; + } + break; default: BUG(); return; @@ -867,7 +927,14 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane) val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); - if (dss_has_feature(FEAT_MGR_LCD2)) { + if (dss_has_feature(FEAT_MGR_LCD3)) { + if (FLD_GET(val, 31, 30) == 0) + channel = FLD_GET(val, shift, shift); + else if (FLD_GET(val, 31, 30) == 1) + channel = OMAP_DSS_CHANNEL_LCD2; + else + channel = OMAP_DSS_CHANNEL_LCD3; + } else if (dss_has_feature(FEAT_MGR_LCD2)) { if (FLD_GET(val, 31, 30) == 0) channel = FLD_GET(val, shift, shift); else @@ -922,16 +989,10 @@ void dispc_enable_gamma_table(bool enable) static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) { - u16 reg; - - if (channel == OMAP_DSS_CHANNEL_LCD) - reg = DISPC_CONFIG; - else if (channel == OMAP_DSS_CHANNEL_LCD2) - reg = DISPC_CONFIG2; - else + if (channel == OMAP_DSS_CHANNEL_DIGIT) return; - REG_FLD_MOD(reg, enable, 15, 15); + mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable); } static void dispc_mgr_set_cpr_coef(enum omap_channel channel, @@ -939,7 +1000,7 @@ static void dispc_mgr_set_cpr_coef(enum omap_channel channel, { u32 coef_r, coef_g, coef_b; - if (!dispc_mgr_is_lcd(channel)) + if (!dss_mgr_is_lcd(channel)) return; coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) | @@ -1798,7 +1859,7 @@ static int check_horiz_timing_omap3(enum omap_channel channel, nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width; pclk = dispc_mgr_pclk_rate(channel); - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) lclk = dispc_mgr_lclk_rate(channel); else lclk = dispc_fclk_rate(); @@ -2086,8 +2147,7 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane, } int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication, - const struct omap_video_timings *mgr_timings) + bool replication, const struct omap_video_timings *mgr_timings) { struct omap_overlay *ovl = omap_dss_get_overlay(plane); bool five_taps = true; @@ -2103,6 +2163,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, u16 out_width, out_height; enum omap_channel channel; int x_predecim = 1, y_predecim = 1; + bool ilace = mgr_timings->interlace; channel = dispc_ovl_get_channel_out(plane); @@ -2254,14 +2315,9 @@ static void dispc_disable_isr(void *data, u32 mask) static void _enable_lcd_out(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) { - REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0); - /* flush posted write */ - dispc_read_reg(DISPC_CONTROL2); - } else { - REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0); - dispc_read_reg(DISPC_CONTROL); - } + mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); + /* flush posted write */ + mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); } static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable) @@ -2274,12 +2330,9 @@ static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable) /* When we disable LCD output, we need to wait until frame is done. * Otherwise the DSS is still working, and turning off the clocks * prevents DSS from going to OFF mode */ - is_on = channel == OMAP_DSS_CHANNEL_LCD2 ? - REG_GET(DISPC_CONTROL2, 0, 0) : - REG_GET(DISPC_CONTROL, 0, 0); + is_on = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); - irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 : - DISPC_IRQ_FRAMEDONE; + irq = mgr_desc[channel].framedone_irq; if (!enable && is_on) { init_completion(&frame_done_completion); @@ -2384,21 +2437,12 @@ static void dispc_mgr_enable_digit_out(bool enable) bool dispc_mgr_is_enabled(enum omap_channel channel) { - if (channel == OMAP_DSS_CHANNEL_LCD) - return !!REG_GET(DISPC_CONTROL, 0, 0); - else if (channel == OMAP_DSS_CHANNEL_DIGIT) - return !!REG_GET(DISPC_CONTROL, 1, 1); - else if (channel == OMAP_DSS_CHANNEL_LCD2) - return !!REG_GET(DISPC_CONTROL2, 0, 0); - else { - BUG(); - return false; - } + return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); } void dispc_mgr_enable(enum omap_channel channel, bool enable) { - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) dispc_mgr_enable_lcd_out(channel, enable); else if (channel == OMAP_DSS_CHANNEL_DIGIT) dispc_mgr_enable_digit_out(enable); @@ -2432,36 +2476,13 @@ void dispc_pck_free_enable(bool enable) void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); - else - REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); + mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable); } -void dispc_mgr_set_lcd_display_type(enum omap_channel channel, - enum omap_lcd_display_type type) +void dispc_mgr_set_lcd_type_tft(enum omap_channel channel) { - int mode; - - switch (type) { - case OMAP_DSS_LCD_DISPLAY_STN: - mode = 0; - break; - - case OMAP_DSS_LCD_DISPLAY_TFT: - mode = 1; - break; - - default: - BUG(); - return; - } - - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); - else - REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); + mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1); } void dispc_set_loadmode(enum omap_dss_load_mode mode) @@ -2479,24 +2500,14 @@ static void dispc_mgr_set_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type type, u32 trans_key) { - if (ch == OMAP_DSS_CHANNEL_LCD) - REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); - else if (ch == OMAP_DSS_CHANNEL_DIGIT) - REG_FLD_MOD(DISPC_CONFIG, type, 13, 13); - else /* OMAP_DSS_CHANNEL_LCD2 */ - REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); + mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type); dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); } static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable) { - if (ch == OMAP_DSS_CHANNEL_LCD) - REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); - else if (ch == OMAP_DSS_CHANNEL_DIGIT) - REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); - else /* OMAP_DSS_CHANNEL_LCD2 */ - REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); + mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable); } static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, @@ -2547,10 +2558,7 @@ void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines) return; } - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); - else - REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); + mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code); } void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) @@ -2584,10 +2592,7 @@ void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, enable, 11, 11); - else - REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11); + mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable); } static bool _dispc_mgr_size_ok(u16 width, u16 height) @@ -2627,7 +2632,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res); - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw, timings->hfp, timings->hbp, timings->vsw, timings->vfp, @@ -2637,9 +2642,16 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, } static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, - int hfp, int hbp, int vsw, int vfp, int vbp) + int hfp, int hbp, int vsw, int vfp, int vbp, + enum omap_dss_signal_level vsync_level, + enum omap_dss_signal_level hsync_level, + enum omap_dss_signal_edge data_pclk_edge, + enum omap_dss_signal_level de_level, + enum omap_dss_signal_edge sync_pclk_edge) + { - u32 timing_h, timing_v; + u32 timing_h, timing_v, l; + bool onoff, rf, ipc; if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) | @@ -2657,6 +2669,44 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, dispc_write_reg(DISPC_TIMING_H(channel), timing_h); dispc_write_reg(DISPC_TIMING_V(channel), timing_v); + + switch (data_pclk_edge) { + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + ipc = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + ipc = true; + break; + case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES: + default: + BUG(); + } + + switch (sync_pclk_edge) { + case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES: + onoff = false; + rf = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + onoff = true; + rf = false; + break; + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + onoff = true; + rf = true; + break; + default: + BUG(); + }; + + l = dispc_read_reg(DISPC_POL_FREQ(channel)); + l |= FLD_VAL(onoff, 17, 17); + l |= FLD_VAL(rf, 16, 16); + l |= FLD_VAL(de_level, 15, 15); + l |= FLD_VAL(ipc, 14, 14); + l |= FLD_VAL(hsync_level, 13, 13); + l |= FLD_VAL(vsync_level, 12, 12); + dispc_write_reg(DISPC_POL_FREQ(channel), l); } /* change name to mode? */ @@ -2674,9 +2724,10 @@ void dispc_mgr_set_timings(enum omap_channel channel, return; } - if (dispc_mgr_is_lcd(channel)) { + if (dss_mgr_is_lcd(channel)) { _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw, - t.vfp, t.vbp); + t.vfp, t.vbp, t.vsync_level, t.hsync_level, + t.data_pclk_edge, t.de_level, t.sync_pclk_edge); xtot = t.x_res + t.hfp + t.hsw + t.hbp; ytot = t.y_res + t.vfp + t.vsw + t.vbp; @@ -2687,14 +2738,13 @@ void dispc_mgr_set_timings(enum omap_channel channel, DSSDBG("pck %u\n", timings->pixel_clock); DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp); + DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n", + t.vsync_level, t.hsync_level, t.data_pclk_edge, + t.de_level, t.sync_pclk_edge); DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); } else { - enum dss_hdmi_venc_clk_source_select source; - - source = dss_get_hdmi_venc_clk_source(); - - if (source == DSS_VENC_TV_CLK) + if (t.interlace == true) t.y_res /= 2; } @@ -2780,7 +2830,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) { unsigned long r; - if (dispc_mgr_is_lcd(channel)) { + if (dss_mgr_is_lcd(channel)) { int pcd; u32 l; @@ -2821,12 +2871,32 @@ unsigned long dispc_core_clk_rate(void) return fclk / lcd; } -void dispc_dump_clocks(struct seq_file *s) +static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) { int lcd, pcd; + enum omap_dss_clk_source lcd_clk_src; + + seq_printf(s, "- %s -\n", mgr_desc[channel].name); + + lcd_clk_src = dss_get_lcd_clk_source(channel); + + seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, + dss_get_generic_clk_source_name(lcd_clk_src), + dss_feat_get_clk_source_name(lcd_clk_src)); + + dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + dispc_mgr_lclk_rate(channel), lcd); + seq_printf(s, "pck\t\t%-16lupck div\t%u\n", + dispc_mgr_pclk_rate(channel), pcd); +} + +void dispc_dump_clocks(struct seq_file *s) +{ + int lcd; u32 l; enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); - enum omap_dss_clk_source lcd_clk_src; if (dispc_runtime_get()) return; @@ -2847,36 +2917,13 @@ void dispc_dump_clocks(struct seq_file *s) seq_printf(s, "lck\t\t%-16lulck div\t%u\n", (dispc_fclk_rate()/lcd), lcd); } - seq_printf(s, "- LCD1 -\n"); - - lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD); - - seq_printf(s, "lcd1_clk source = %s (%s)\n", - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); - - dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd); - - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", - dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", - dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd); - if (dss_has_feature(FEAT_MGR_LCD2)) { - seq_printf(s, "- LCD2 -\n"); - - lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2); - seq_printf(s, "lcd2_clk source = %s (%s)\n", - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD); - dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd); - - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", - dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", - dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); - } + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2); + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3); dispc_runtime_put(); } @@ -2929,6 +2976,12 @@ void dispc_dump_irqs(struct seq_file *s) PIS(ACBIAS_COUNT_STAT2); PIS(SYNC_LOST2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + PIS(FRAMEDONE3); + PIS(VSYNC3); + PIS(ACBIAS_COUNT_STAT3); + PIS(SYNC_LOST3); + } #undef PIS } #endif @@ -2940,6 +2993,7 @@ static void dispc_dump_regs(struct seq_file *s) [OMAP_DSS_CHANNEL_LCD] = "LCD", [OMAP_DSS_CHANNEL_DIGIT] = "TV", [OMAP_DSS_CHANNEL_LCD2] = "LCD2", + [OMAP_DSS_CHANNEL_LCD3] = "LCD3", }; const char *ovl_names[] = { [OMAP_DSS_GFX] = "GFX", @@ -2972,6 +3026,10 @@ static void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_CONTROL2); DUMPREG(DISPC_CONFIG2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + DUMPREG(DISPC_CONTROL3); + DUMPREG(DISPC_CONFIG3); + } #undef DUMPREG @@ -3093,41 +3151,8 @@ static void dispc_dump_regs(struct seq_file *s) #undef DUMPREG } -static void _dispc_mgr_set_pol_freq(enum omap_channel channel, bool onoff, - bool rf, bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, - u8 acb) -{ - u32 l = 0; - - DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n", - onoff, rf, ieo, ipc, ihs, ivs, acbi, acb); - - l |= FLD_VAL(onoff, 17, 17); - l |= FLD_VAL(rf, 16, 16); - l |= FLD_VAL(ieo, 15, 15); - l |= FLD_VAL(ipc, 14, 14); - l |= FLD_VAL(ihs, 13, 13); - l |= FLD_VAL(ivs, 12, 12); - l |= FLD_VAL(acbi, 11, 8); - l |= FLD_VAL(acb, 7, 0); - - dispc_write_reg(DISPC_POL_FREQ(channel), l); -} - -void dispc_mgr_set_pol_freq(enum omap_channel channel, - enum omap_panel_config config, u8 acbi, u8 acb) -{ - _dispc_mgr_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0, - (config & OMAP_DSS_LCD_RF) != 0, - (config & OMAP_DSS_LCD_IEO) != 0, - (config & OMAP_DSS_LCD_IPC) != 0, - (config & OMAP_DSS_LCD_IHS) != 0, - (config & OMAP_DSS_LCD_IVS) != 0, - acbi, acb); -} - /* with fck as input clock rate, find dispc dividers that produce req_pck */ -void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, +void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo) { u16 pcd_min, pcd_max; @@ -3138,9 +3163,6 @@ void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD); pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD); - if (!is_tft) - pcd_min = 3; - best_pck = 0; best_ld = 0; best_pd = 0; @@ -3192,15 +3214,13 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, return 0; } -int dispc_mgr_set_clock_div(enum omap_channel channel, +void dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo) { DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div); DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div); dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div); - - return 0; } int dispc_mgr_get_clock_div(enum omap_channel channel, @@ -3354,6 +3374,8 @@ static void print_irq_status(u32 status) PIS(SYNC_LOST_DIGIT); if (dss_has_feature(FEAT_MGR_LCD2)) PIS(SYNC_LOST2); + if (dss_has_feature(FEAT_MGR_LCD3)) + PIS(SYNC_LOST3); #undef PIS printk("\n"); @@ -3450,12 +3472,6 @@ static void dispc_error_worker(struct work_struct *work) DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; - static const unsigned sync_lost_bits[] = { - DISPC_IRQ_SYNC_LOST, - DISPC_IRQ_SYNC_LOST_DIGIT, - DISPC_IRQ_SYNC_LOST2, - }; - spin_lock_irqsave(&dispc.irq_lock, flags); errors = dispc.error_irqs; dispc.error_irqs = 0; @@ -3484,7 +3500,7 @@ static void dispc_error_worker(struct work_struct *work) unsigned bit; mgr = omap_dss_get_overlay_manager(i); - bit = sync_lost_bits[i]; + bit = mgr_desc[i].sync_lost_irq; if (bit & errors) { struct omap_dss_device *dssdev = mgr->device; @@ -3603,6 +3619,8 @@ static void _omap_dispc_initialize_irq(void) dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; if (dss_has_feature(FEAT_MGR_LCD2)) dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2; + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST3; if (dss_feat_get_num_ovls() > 3) dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW; diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h index f278080e1063..92d8a9be86fc 100644 --- a/drivers/video/omap2/dss/dispc.h +++ b/drivers/video/omap2/dss/dispc.h @@ -36,6 +36,8 @@ #define DISPC_CONTROL2 0x0238 #define DISPC_CONFIG2 0x0620 #define DISPC_DIVISOR 0x0804 +#define DISPC_CONTROL3 0x0848 +#define DISPC_CONFIG3 0x084C /* DISPC overlay registers */ #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ @@ -118,6 +120,8 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel) return 0x0050; case OMAP_DSS_CHANNEL_LCD2: return 0x03AC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0814; default: BUG(); return 0; @@ -133,6 +137,8 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel) return 0x0058; case OMAP_DSS_CHANNEL_LCD2: return 0x03B0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0818; default: BUG(); return 0; @@ -149,6 +155,8 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0400; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0840; default: BUG(); return 0; @@ -165,6 +173,8 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0404; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0844; default: BUG(); return 0; @@ -181,6 +191,8 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0408; + case OMAP_DSS_CHANNEL_LCD3: + return 0x083C; default: BUG(); return 0; @@ -197,6 +209,8 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x040C; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0838; default: BUG(); return 0; @@ -213,6 +227,8 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel) return 0x0078; case OMAP_DSS_CHANNEL_LCD2: return 0x03CC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0834; default: BUG(); return 0; @@ -229,6 +245,8 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0828; default: BUG(); return 0; @@ -245,6 +263,8 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x082C; default: BUG(); return 0; @@ -261,6 +281,8 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0830; default: BUG(); return 0; @@ -277,6 +299,8 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03BC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0824; default: BUG(); return 0; @@ -293,6 +317,8 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0820; default: BUG(); return 0; @@ -309,6 +335,8 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x081C; default: BUG(); return 0; diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index 249010630370..5bd957e85505 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -116,7 +116,7 @@ static ssize_t display_timings_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - struct omap_video_timings t; + struct omap_video_timings t = dssdev->panel.timings; int r, found; if (!dssdev->driver->set_timings || !dssdev->driver->check_timings) @@ -316,44 +316,6 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev, } EXPORT_SYMBOL(omapdss_default_get_timings); -/* Checks if replication logic should be used. Only use for active matrix, - * when overlay is in RGB12U or RGB16 mode, and LCD interface is - * 18bpp or 24bpp */ -bool dss_use_replication(struct omap_dss_device *dssdev, - enum omap_color_mode mode) -{ - int bpp; - - if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) - return false; - - if (dssdev->type == OMAP_DISPLAY_TYPE_DPI && - (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0) - return false; - - switch (dssdev->type) { - case OMAP_DISPLAY_TYPE_DPI: - bpp = dssdev->phy.dpi.data_lines; - break; - case OMAP_DISPLAY_TYPE_HDMI: - case OMAP_DISPLAY_TYPE_VENC: - case OMAP_DISPLAY_TYPE_SDI: - bpp = 24; - break; - case OMAP_DISPLAY_TYPE_DBI: - bpp = dssdev->ctrl.pixel_size; - break; - case OMAP_DISPLAY_TYPE_DSI: - bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); - break; - default: - BUG(); - return false; - } - - return bpp > 16; -} - void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev) { diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 8c2056c9537b..3266be23fc0d 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -38,6 +38,8 @@ static struct { struct regulator *vdds_dsi_reg; struct platform_device *dsidev; + + struct dss_lcd_mgr_config mgr_config; } dpi; static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk) @@ -64,7 +66,7 @@ static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev) return false; } -static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, +static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -72,8 +74,8 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req, - &dsi_cinfo, &dispc_cinfo); + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo, + &dispc_cinfo); if (r) return r; @@ -83,11 +85,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) { - dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); - return r; - } + dpi.mgr_config.clock_info = dispc_cinfo; *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; *lck_div = dispc_cinfo.lck_div; @@ -96,7 +94,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, return 0; } -static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, +static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -104,7 +102,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo); + r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo); if (r) return r; @@ -112,9 +110,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, if (r) return r; - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) - return r; + dpi.mgr_config.clock_info = dispc_cinfo; *fck = dss_cinfo.fck; *lck_div = dispc_cinfo.lck_div; @@ -129,20 +125,14 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) int lck_div = 0, pck_div = 0; unsigned long fck = 0; unsigned long pck; - bool is_tft; int r = 0; - dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config, - dssdev->panel.acbi, dssdev->panel.acb); - - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - if (dpi_use_dsi_pll(dssdev)) - r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); else - r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); if (r) return r; @@ -161,19 +151,18 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) return 0; } -static void dpi_basic_init(struct omap_dss_device *dssdev) +static void dpi_config_lcd_manager(struct omap_dss_device *dssdev) { - bool is_tft; + dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + dpi.mgr_config.stallmode = false; + dpi.mgr_config.fifohandcheck = false; - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; + dpi.mgr_config.video_port_width = dssdev->phy.dpi.data_lines; - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS); - dispc_mgr_enable_stallmode(dssdev->manager->id, false); + dpi.mgr_config.lcden_sig_polarity = 0; - dispc_mgr_set_lcd_display_type(dssdev->manager->id, is_tft ? - OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); - dispc_mgr_set_tft_data_lines(dssdev->manager->id, - dssdev->phy.dpi.data_lines); + dss_mgr_set_lcd_config(dssdev->manager, &dpi.mgr_config); } int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) @@ -206,8 +195,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - dpi_basic_init(dssdev); - if (dpi_use_dsi_pll(dssdev)) { r = dsi_runtime_get(dpi.dsidev); if (r) @@ -222,6 +209,8 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_set_mode; + dpi_config_lcd_manager(dssdev); + mdelay(2); r = dss_mgr_enable(dssdev->manager); @@ -292,7 +281,6 @@ EXPORT_SYMBOL(dpi_set_timings); int dpi_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - bool is_tft; int r; int lck_div, pck_div; unsigned long fck; @@ -305,11 +293,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev, if (timings->pixel_clock == 0) return -EINVAL; - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - if (dpi_use_dsi_pll(dssdev)) { struct dsi_clock_info dsi_cinfo; - r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, timings->pixel_clock * 1000, &dsi_cinfo, &dispc_cinfo); @@ -319,7 +305,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; } else { struct dss_clock_info dss_cinfo; - r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000, + r = dss_calc_clock_div(timings->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 14ce8cc079e3..b07e8864f82f 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -331,6 +331,8 @@ struct dsi_data { unsigned num_lanes_used; unsigned scp_clk_refcount; + + struct dss_lcd_mgr_config mgr_config; }; struct dsi_packet_sent_handler_data { @@ -1085,9 +1087,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev, struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); if (enable) - clk_enable(dsi->sys_clk); + clk_prepare_enable(dsi->sys_clk); else - clk_disable(dsi->sys_clk); + clk_disable_unprepare(dsi->sys_clk); if (enable && dsi->pll_locked) { if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) @@ -1316,7 +1318,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev, return 0; } -int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { @@ -1335,8 +1337,8 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, dsi->cache_cinfo.clkin == dss_sys_clk) { DSSDBG("DSI clock info found from cache\n"); *dsi_cinfo = dsi->cache_cinfo; - dispc_find_clk_divs(is_tft, req_pck, - dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); + dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk, + dispc_cinfo); return 0; } @@ -1402,7 +1404,7 @@ retry: match = 1; - dispc_find_clk_divs(is_tft, req_pck, + dispc_find_clk_divs(req_pck, cur.dsi_pll_hsdiv_dispc_clk, &cur_dispc); @@ -3631,17 +3633,14 @@ static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev) static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - int de_pol = dssdev->panel.dsi_vm_data.vp_de_pol; - int hsync_pol = dssdev->panel.dsi_vm_data.vp_hsync_pol; - int vsync_pol = dssdev->panel.dsi_vm_data.vp_vsync_pol; bool vsync_end = dssdev->panel.dsi_vm_data.vp_vsync_end; bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end; u32 r; r = dsi_read_reg(dsidev, DSI_CTRL); - r = FLD_MOD(r, de_pol, 9, 9); /* VP_DE_POL */ - r = FLD_MOD(r, hsync_pol, 10, 10); /* VP_HSYNC_POL */ - r = FLD_MOD(r, vsync_pol, 11, 11); /* VP_VSYNC_POL */ + r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */ + r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */ + r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */ r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */ r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ @@ -4340,52 +4339,101 @@ EXPORT_SYMBOL(omap_dsi_update); /* Display funcs */ +static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dispc_clock_info dispc_cinfo; + int r; + unsigned long long fck; + + fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); + + dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; + dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; + + r = dispc_calc_clock_rates(fck, &dispc_cinfo); + if (r) { + DSSERR("Failed to calc dispc clocks\n"); + return r; + } + + dsi->mgr_config.clock_info = dispc_cinfo; + + return 0; +} + static int dsi_display_init_dispc(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_video_timings timings; int r; + u32 irq = 0; if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { u16 dw, dh; - u32 irq; - struct omap_video_timings timings = { - .hsw = 1, - .hfp = 1, - .hbp = 1, - .vsw = 1, - .vfp = 0, - .vbp = 0, - }; dssdev->driver->get_resolution(dssdev, &dw, &dh); + timings.x_res = dw; timings.y_res = dh; + timings.hsw = 1; + timings.hfp = 1; + timings.hbp = 1; + timings.vsw = 1; + timings.vfp = 0; + timings.vbp = 0; - irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? - DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev, irq); if (r) { DSSERR("can't get FRAMEDONE irq\n"); - return r; + goto err; } - dispc_mgr_enable_stallmode(dssdev->manager->id, true); - dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1); - - dss_mgr_set_timings(dssdev->manager, &timings); + dsi->mgr_config.stallmode = true; + dsi->mgr_config.fifohandcheck = true; } else { - dispc_mgr_enable_stallmode(dssdev->manager->id, false); - dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0); + timings = dssdev->panel.timings; - dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); + dsi->mgr_config.stallmode = false; + dsi->mgr_config.fifohandcheck = false; } - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); - dispc_mgr_set_tft_data_lines(dssdev->manager->id, - dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt)); + /* + * override interlace, logic level and edge related parameters in + * omap_video_timings with default values + */ + timings.interlace = false; + timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + + dss_mgr_set_timings(dssdev->manager, &timings); + + r = dsi_configure_dispc_clocks(dssdev); + if (r) + goto err1; + + dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + dsi->mgr_config.video_port_width = + dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); + dsi->mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(dssdev->manager, &dsi->mgr_config); + return 0; +err1: + if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) + omap_dispc_unregister_isr(dsi_framedone_irq_callback, + (void *) dssdev, irq); +err: + return r; } static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) @@ -4393,8 +4441,7 @@ static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { u32 irq; - irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? - DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev, irq); @@ -4426,33 +4473,6 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) return 0; } -static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) -{ - struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - struct dispc_clock_info dispc_cinfo; - int r; - unsigned long long fck; - - fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); - - dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; - dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; - - r = dispc_calc_clock_rates(fck, &dispc_cinfo); - if (r) { - DSSERR("Failed to calc dispc clocks\n"); - return r; - } - - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) { - DSSERR("Failed to set dispc clocks\n"); - return r; - } - - return 0; -} - static int dsi_display_init_dsi(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -4474,10 +4494,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) DSSDBG("PLL OK\n"); - r = dsi_configure_dispc_clocks(dssdev); - if (r) - goto err2; - r = dsi_cio_init(dssdev); if (r) goto err2; diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index d2b57197b292..04b4586113e3 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -388,7 +388,8 @@ void dss_select_lcd_clk_source(enum omap_channel channel, dsi_wait_pll_hsdiv_dispc_active(dsidev); break; case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2); + BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && + channel != OMAP_DSS_CHANNEL_LCD3); b = 1; dsidev = dsi_get_dsidev_from_id(1); dsi_wait_pll_hsdiv_dispc_active(dsidev); @@ -398,10 +399,12 @@ void dss_select_lcd_clk_source(enum omap_channel channel, return; } - pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12; + pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ - ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); dss.lcd_clk_source[ix] = clk_src; } @@ -418,7 +421,8 @@ enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { if (dss_has_feature(FEAT_LCD_CLK_SRC)) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); return dss.lcd_clk_source[ix]; } else { /* LCD_CLK source is the same as DISPC_FCLK source for @@ -502,8 +506,7 @@ unsigned long dss_get_dpll4_rate(void) return 0; } -int dss_calc_clock_div(bool is_tft, unsigned long req_pck, - struct dss_clock_info *dss_cinfo, +int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, struct dispc_clock_info *dispc_cinfo) { unsigned long prate; @@ -551,7 +554,7 @@ retry: fck = clk_get_rate(dss.dss_clk); fck_div = 1; - dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); + dispc_find_clk_divs(req_pck, fck, &cur_dispc); match = 1; best_dss.fck = fck; @@ -581,7 +584,7 @@ retry: match = 1; - dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); + dispc_find_clk_divs(req_pck, fck, &cur_dispc); if (abs(cur_dispc.pck - req_pck) < abs(best_dispc.pck - req_pck)) { diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index dd1092ceaeef..f67afe76f217 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -152,6 +152,25 @@ struct dsi_clock_info { u16 lp_clk_div; }; +struct reg_field { + u16 reg; + u8 high; + u8 low; +}; + +struct dss_lcd_mgr_config { + enum dss_io_pad_mode io_pad_mode; + + bool stallmode; + bool fifohandcheck; + + struct dispc_clock_info clock_info; + + int video_port_width; + + int lcden_sig_polarity; +}; + struct seq_file; struct platform_device; @@ -188,6 +207,8 @@ int dss_mgr_set_device(struct omap_overlay_manager *mgr, int dss_mgr_unset_device(struct omap_overlay_manager *mgr); void dss_mgr_set_timings(struct omap_overlay_manager *mgr, struct omap_video_timings *timings); +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config); const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr); bool dss_ovl_is_enabled(struct omap_overlay *ovl); @@ -210,8 +231,6 @@ void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev); void dss_uninit_device(struct platform_device *pdev, struct omap_dss_device *dssdev); -bool dss_use_replication(struct omap_dss_device *dssdev, - enum omap_color_mode mode); /* manager */ int dss_init_overlay_managers(struct platform_device *pdev); @@ -223,8 +242,18 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr, int dss_mgr_check(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info, const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *config, struct omap_overlay_info **overlay_infos); +static inline bool dss_mgr_is_lcd(enum omap_channel id) +{ + if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 || + id == OMAP_DSS_CHANNEL_LCD3) + return true; + else + return false; +} + /* overlay */ void dss_init_overlays(struct platform_device *pdev); void dss_uninit_overlays(struct platform_device *pdev); @@ -234,6 +263,8 @@ int dss_ovl_simple_check(struct omap_overlay *ovl, const struct omap_overlay_info *info); int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, const struct omap_video_timings *mgr_timings); +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode); /* DSS */ int dss_init_platform_driver(void) __init; @@ -268,8 +299,7 @@ unsigned long dss_get_dpll4_rate(void); int dss_calc_clock_rates(struct dss_clock_info *cinfo); int dss_set_clock_div(struct dss_clock_info *cinfo); int dss_get_clock_div(struct dss_clock_info *cinfo); -int dss_calc_clock_div(bool is_tft, unsigned long req_pck, - struct dss_clock_info *dss_cinfo, +int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, struct dispc_clock_info *dispc_cinfo); /* SDI */ @@ -296,7 +326,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); int dsi_pll_set_clock_div(struct platform_device *dsidev, struct dsi_clock_info *cinfo); -int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, unsigned long req_pck, struct dsi_clock_info *cinfo, struct dispc_clock_info *dispc_cinfo); int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, @@ -330,7 +360,7 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev, return -ENODEV; } static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, - bool is_tft, unsigned long req_pck, + unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { @@ -387,7 +417,7 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode); bool dispc_mgr_timings_ok(enum omap_channel channel, const struct omap_video_timings *timings); unsigned long dispc_fclk_rate(void); -void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, +void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo); int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, struct dispc_clock_info *cinfo); @@ -398,8 +428,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, bool manual_update); int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication, - const struct omap_video_timings *mgr_timings); + bool replication, const struct omap_video_timings *mgr_timings); int dispc_ovl_enable(enum omap_plane plane, bool enable); void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel); @@ -415,16 +444,13 @@ bool dispc_mgr_is_channel_enabled(enum omap_channel channel); void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode); void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable); void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines); -void dispc_mgr_set_lcd_display_type(enum omap_channel channel, - enum omap_lcd_display_type type); +void dispc_mgr_set_lcd_type_tft(enum omap_channel channel); void dispc_mgr_set_timings(enum omap_channel channel, struct omap_video_timings *timings); -void dispc_mgr_set_pol_freq(enum omap_channel channel, - enum omap_panel_config config, u8 acbi, u8 acb); unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); unsigned long dispc_core_clk_rate(void); -int dispc_mgr_set_clock_div(enum omap_channel channel, +void dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); int dispc_mgr_get_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index bdf469f080e7..996ffcbfed58 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -24,9 +24,9 @@ #include "ti_hdmi.h" #endif -#define MAX_DSS_MANAGERS 3 +#define MAX_DSS_MANAGERS 4 #define MAX_DSS_OVERLAYS 4 -#define MAX_DSS_LCD_MANAGERS 2 +#define MAX_DSS_LCD_MANAGERS 3 #define MAX_NUM_DSI 2 /* DSS has feature id */ @@ -36,6 +36,7 @@ enum dss_feat_id { FEAT_PCKFREEENABLE, FEAT_FUNCGATED, FEAT_MGR_LCD2, + FEAT_MGR_LCD3, FEAT_LINEBUFFERSPLIT, FEAT_ROWREPEATENABLE, FEAT_RESIZECONF, diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 26a2430a7028..060216fdc578 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -78,43 +78,214 @@ static struct { */ static const struct hdmi_config cea_timings[] = { -{ {640, 480, 25200, 96, 16, 48, 2, 10, 33, 0, 0, 0}, {1, HDMI_HDMI} }, -{ {720, 480, 27027, 62, 16, 60, 6, 9, 30, 0, 0, 0}, {2, HDMI_HDMI} }, -{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {4, HDMI_HDMI} }, -{ {1920, 540, 74250, 44, 88, 148, 5, 2, 15, 1, 1, 1}, {5, HDMI_HDMI} }, -{ {1440, 240, 27027, 124, 38, 114, 3, 4, 15, 0, 0, 1}, {6, HDMI_HDMI} }, -{ {1920, 1080, 148500, 44, 88, 148, 5, 4, 36, 1, 1, 0}, {16, HDMI_HDMI} }, -{ {720, 576, 27000, 64, 12, 68, 5, 5, 39, 0, 0, 0}, {17, HDMI_HDMI} }, -{ {1280, 720, 74250, 40, 440, 220, 5, 5, 20, 1, 1, 0}, {19, HDMI_HDMI} }, -{ {1920, 540, 74250, 44, 528, 148, 5, 2, 15, 1, 1, 1}, {20, HDMI_HDMI} }, -{ {1440, 288, 27000, 126, 24, 138, 3, 2, 19, 0, 0, 1}, {21, HDMI_HDMI} }, -{ {1440, 576, 54000, 128, 24, 136, 5, 5, 39, 0, 0, 0}, {29, HDMI_HDMI} }, -{ {1920, 1080, 148500, 44, 528, 148, 5, 4, 36, 1, 1, 0}, {31, HDMI_HDMI} }, -{ {1920, 1080, 74250, 44, 638, 148, 5, 4, 36, 1, 1, 0}, {32, HDMI_HDMI} }, -{ {2880, 480, 108108, 248, 64, 240, 6, 9, 30, 0, 0, 0}, {35, HDMI_HDMI} }, -{ {2880, 576, 108000, 256, 48, 272, 5, 5, 39, 0, 0, 0}, {37, HDMI_HDMI} }, + { + { 640, 480, 25200, 96, 16, 48, 2, 10, 33, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 1, HDMI_HDMI }, + }, + { + { 720, 480, 27027, 62, 16, 60, 6, 9, 30, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 2, HDMI_HDMI }, + }, + { + { 1280, 720, 74250, 40, 110, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 4, HDMI_HDMI }, + }, + { + { 1920, 540, 74250, 44, 88, 148, 5, 2, 15, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + true, }, + { 5, HDMI_HDMI }, + }, + { + { 1440, 240, 27027, 124, 38, 114, 3, 4, 15, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + true, }, + { 6, HDMI_HDMI }, + }, + { + { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 16, HDMI_HDMI }, + }, + { + { 720, 576, 27000, 64, 12, 68, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 17, HDMI_HDMI }, + }, + { + { 1280, 720, 74250, 40, 440, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 19, HDMI_HDMI }, + }, + { + { 1920, 540, 74250, 44, 528, 148, 5, 2, 15, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + true, }, + { 20, HDMI_HDMI }, + }, + { + { 1440, 288, 27000, 126, 24, 138, 3, 2, 19, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + true, }, + { 21, HDMI_HDMI }, + }, + { + { 1440, 576, 54000, 128, 24, 136, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 29, HDMI_HDMI }, + }, + { + { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 31, HDMI_HDMI }, + }, + { + { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 32, HDMI_HDMI }, + }, + { + { 2880, 480, 108108, 248, 64, 240, 6, 9, 30, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 35, HDMI_HDMI }, + }, + { + { 2880, 576, 108000, 256, 48, 272, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 37, HDMI_HDMI }, + }, }; + static const struct hdmi_config vesa_timings[] = { /* VESA From Here */ -{ {640, 480, 25175, 96, 16, 48, 2 , 11, 31, 0, 0, 0}, {4, HDMI_DVI} }, -{ {800, 600, 40000, 128, 40, 88, 4 , 1, 23, 1, 1, 0}, {9, HDMI_DVI} }, -{ {848, 480, 33750, 112, 16, 112, 8 , 6, 23, 1, 1, 0}, {0xE, HDMI_DVI} }, -{ {1280, 768, 79500, 128, 64, 192, 7 , 3, 20, 1, 0, 0}, {0x17, HDMI_DVI} }, -{ {1280, 800, 83500, 128, 72, 200, 6 , 3, 22, 1, 0, 0}, {0x1C, HDMI_DVI} }, -{ {1360, 768, 85500, 112, 64, 256, 6 , 3, 18, 1, 1, 0}, {0x27, HDMI_DVI} }, -{ {1280, 960, 108000, 112, 96, 312, 3 , 1, 36, 1, 1, 0}, {0x20, HDMI_DVI} }, -{ {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38, 1, 1, 0}, {0x23, HDMI_DVI} }, -{ {1024, 768, 65000, 136, 24, 160, 6, 3, 29, 0, 0, 0}, {0x10, HDMI_DVI} }, -{ {1400, 1050, 121750, 144, 88, 232, 4, 3, 32, 1, 0, 0}, {0x2A, HDMI_DVI} }, -{ {1440, 900, 106500, 152, 80, 232, 6, 3, 25, 1, 0, 0}, {0x2F, HDMI_DVI} }, -{ {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, 1, 0, 0}, {0x3A, HDMI_DVI} }, -{ {1366, 768, 85500, 143, 70, 213, 3, 3, 24, 1, 1, 0}, {0x51, HDMI_DVI} }, -{ {1920, 1080, 148500, 44, 148, 80, 5, 4, 36, 1, 1, 0}, {0x52, HDMI_DVI} }, -{ {1280, 768, 68250, 32, 48, 80, 7, 3, 12, 0, 1, 0}, {0x16, HDMI_DVI} }, -{ {1400, 1050, 101000, 32, 48, 80, 4, 3, 23, 0, 1, 0}, {0x29, HDMI_DVI} }, -{ {1680, 1050, 119000, 32, 48, 80, 6, 3, 21, 0, 1, 0}, {0x39, HDMI_DVI} }, -{ {1280, 800, 79500, 32, 48, 80, 6, 3, 14, 0, 1, 0}, {0x1B, HDMI_DVI} }, -{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {0x55, HDMI_DVI} } + { + { 640, 480, 25175, 96, 16, 48, 2, 11, 31, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 4, HDMI_DVI }, + }, + { + { 800, 600, 40000, 128, 40, 88, 4, 1, 23, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 9, HDMI_DVI }, + }, + { + { 848, 480, 33750, 112, 16, 112, 8, 6, 23, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0xE, HDMI_DVI }, + }, + { + { 1280, 768, 79500, 128, 64, 192, 7, 3, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x17, HDMI_DVI }, + }, + { + { 1280, 800, 83500, 128, 72, 200, 6, 3, 22, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x1C, HDMI_DVI }, + }, + { + { 1360, 768, 85500, 112, 64, 256, 6, 3, 18, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x27, HDMI_DVI }, + }, + { + { 1280, 960, 108000, 112, 96, 312, 3, 1, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x20, HDMI_DVI }, + }, + { + { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x23, HDMI_DVI }, + }, + { + { 1024, 768, 65000, 136, 24, 160, 6, 3, 29, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x10, HDMI_DVI }, + }, + { + { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x2A, HDMI_DVI }, + }, + { + { 1440, 900, 106500, 152, 80, 232, 6, 3, 25, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x2F, HDMI_DVI }, + }, + { + { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x3A, HDMI_DVI }, + }, + { + { 1366, 768, 85500, 143, 70, 213, 3, 3, 24, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x51, HDMI_DVI }, + }, + { + { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x52, HDMI_DVI }, + }, + { + { 1280, 768, 68250, 32, 48, 80, 7, 3, 12, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x16, HDMI_DVI }, + }, + { + { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x29, HDMI_DVI }, + }, + { + { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x39, HDMI_DVI }, + }, + { + { 1280, 800, 79500, 32, 48, 80, 6, 3, 14, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x1B, HDMI_DVI }, + }, + { + { 1280, 720, 74250, 40, 110, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x55, HDMI_DVI }, + }, }; static int hdmi_runtime_get(void) @@ -179,7 +350,7 @@ static const struct hdmi_config *hdmi_get_timings(void) } static bool hdmi_timings_compare(struct omap_video_timings *timing1, - const struct hdmi_video_timings *timing2) + const struct omap_video_timings *timing2) { int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync; @@ -758,6 +929,7 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev) hdmi.ip_data.core_av_offset = HDMI_CORE_AV; hdmi.ip_data.pll_offset = HDMI_PLLCTRL; hdmi.ip_data.phy_offset = HDMI_PHY; + mutex_init(&hdmi.ip_data.lock); hdmi_panel_init(); @@ -785,7 +957,7 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) static int hdmi_runtime_suspend(struct device *dev) { - clk_disable(hdmi.sys_clk); + clk_disable_unprepare(hdmi.sys_clk); dispc_runtime_put(); @@ -800,7 +972,7 @@ static int hdmi_runtime_resume(struct device *dev) if (r < 0) return r; - clk_enable(hdmi.sys_clk); + clk_prepare_enable(hdmi.sys_clk); return 0; } diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c index 1179e3c4b1c7..e10844faadf9 100644 --- a/drivers/video/omap2/dss/hdmi_panel.c +++ b/drivers/video/omap2/dss/hdmi_panel.c @@ -43,10 +43,11 @@ static int hdmi_panel_probe(struct omap_dss_device *dssdev) { DSSDBG("ENTER hdmi_panel_probe\n"); - dssdev->panel.config = OMAP_DSS_LCD_TFT | - OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS; - - dssdev->panel.timings = (struct omap_video_timings){640, 480, 25175, 96, 16, 48, 2 , 11, 31}; + dssdev->panel.timings = (struct omap_video_timings) + { 640, 480, 25175, 96, 16, 48, 2, 11, 31, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, + }; DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n", dssdev->panel.timings.x_res, diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index 0cbcde4c688a..53710fadc82d 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -500,16 +500,12 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr) if (r) return r; - if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) { + if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) irq = DISPC_IRQ_EVSYNC_ODD; - } else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) { + else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) irq = DISPC_IRQ_EVSYNC_EVEN; - } else { - if (mgr->id == OMAP_DSS_CHANNEL_LCD) - irq = DISPC_IRQ_VSYNC; - else - irq = DISPC_IRQ_VSYNC2; - } + else + irq = dispc_mgr_get_vsync_irq(mgr->id); r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); @@ -545,6 +541,10 @@ int dss_init_overlay_managers(struct platform_device *pdev) mgr->name = "lcd2"; mgr->id = OMAP_DSS_CHANNEL_LCD2; break; + case 3: + mgr->name = "lcd3"; + mgr->id = OMAP_DSS_CHANNEL_LCD3; + break; } mgr->set_device = &dss_mgr_set_device; @@ -665,9 +665,40 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr, return 0; } +static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct dispc_clock_info cinfo = config->clock_info; + int dl = config->video_port_width; + bool stallmode = config->stallmode; + bool fifohandcheck = config->fifohandcheck; + + if (cinfo.lck_div < 1 || cinfo.lck_div > 255) + return -EINVAL; + + if (cinfo.pck_div < 1 || cinfo.pck_div > 255) + return -EINVAL; + + if (dl != 12 && dl != 16 && dl != 18 && dl != 24) + return -EINVAL; + + /* fifohandcheck should be used only with stallmode */ + if (stallmode == false && fifohandcheck == true) + return -EINVAL; + + /* + * io pad mode can be only checked by using dssdev connected to the + * manager. Ignore checking these for now, add checks when manager + * is capable of holding information related to the connected interface + */ + + return 0; +} + int dss_mgr_check(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info, const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *lcd_config, struct omap_overlay_info **overlay_infos) { struct omap_overlay *ovl; @@ -683,6 +714,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr, if (r) return r; + r = dss_mgr_check_lcd_config(mgr, lcd_config); + if (r) + return r; + list_for_each_entry(ovl, &mgr->overlays, list) { struct omap_overlay_info *oi; int r; diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index b0ba60f88dd2..952c6fad9a81 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -528,14 +528,24 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) struct omap_overlay_manager *lcd_mgr; struct omap_overlay_manager *tv_mgr; struct omap_overlay_manager *lcd2_mgr = NULL; + struct omap_overlay_manager *lcd3_mgr = NULL; struct omap_overlay_manager *mgr = NULL; - lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); - tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); + lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD); + tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_DIGIT); + if (dss_has_feature(FEAT_MGR_LCD3)) + lcd3_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD3); if (dss_has_feature(FEAT_MGR_LCD2)) - lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2); - - if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { + lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD2); + + if (dssdev->channel == OMAP_DSS_CHANNEL_LCD3) { + if (!lcd3_mgr->device || force) { + if (lcd3_mgr->device) + lcd3_mgr->unset_device(lcd3_mgr); + lcd3_mgr->set_device(lcd3_mgr, dssdev); + mgr = lcd3_mgr; + } + } else if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { if (!lcd2_mgr->device || force) { if (lcd2_mgr->device) lcd2_mgr->unset_device(lcd2_mgr); @@ -677,3 +687,16 @@ int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, return 0; } + +/* + * Checks if replication logic should be used. Only use when overlay is in + * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp + */ +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode) +{ + if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) + return false; + + return config.video_port_width > 16; +} diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index 7985fa12b9b4..7c087424b634 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -300,10 +300,11 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, } EXPORT_SYMBOL(omap_rfbi_write_pixels); -static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, +static int rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, u16 height, void (*callback)(void *data), void *data) { u32 l; + int r; struct omap_video_timings timings = { .hsw = 1, .hfp = 1, @@ -322,7 +323,9 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, dss_mgr_set_timings(dssdev->manager, &timings); - dispc_mgr_enable(dssdev->manager->id, true); + r = dss_mgr_enable(dssdev->manager); + if (r) + return r; rfbi.framedone_callback = callback; rfbi.framedone_callback_data = data; @@ -335,6 +338,8 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, l = FLD_MOD(l, 1, 4, 4); /* ITE */ rfbi_write_reg(RFBI_CONTROL, l); + + return 0; } static void framedone_callback(void *data, u32 mask) @@ -814,8 +819,11 @@ int omap_rfbi_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h, void (*callback)(void *), void *data) { - rfbi_transfer_area(dssdev, w, h, callback, data); - return 0; + int r; + + r = rfbi_transfer_area(dssdev, w, h, callback, data); + + return r; } EXPORT_SYMBOL(omap_rfbi_update); @@ -859,6 +867,22 @@ static void rfbi_dump_regs(struct seq_file *s) #undef DUMPREG } +static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev) +{ + struct dss_lcd_mgr_config mgr_config; + + mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI; + + mgr_config.stallmode = true; + /* Do we need fifohandcheck for RFBI? */ + mgr_config.fifohandcheck = false; + + mgr_config.video_port_width = dssdev->ctrl.pixel_size; + mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(dssdev->manager, &mgr_config); +} + int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) { int r; @@ -885,13 +909,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) goto err1; } - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); - - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_RFBI); - dispc_mgr_enable_stallmode(dssdev->manager->id, true); - - dispc_mgr_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size); + rfbi_config_lcd_manager(dssdev); rfbi_configure(dssdev->phy.rfbi.channel, dssdev->ctrl.pixel_size, diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 3a43dc2a9b46..5d31699fbd3c 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -32,19 +32,21 @@ static struct { bool update_enabled; struct regulator *vdds_sdi_reg; -} sdi; -static void sdi_basic_init(struct omap_dss_device *dssdev) + struct dss_lcd_mgr_config mgr_config; +} sdi; +static void sdi_config_lcd_manager(struct omap_dss_device *dssdev) { - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS); - dispc_mgr_enable_stallmode(dssdev->manager->id, false); + sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + sdi.mgr_config.stallmode = false; + sdi.mgr_config.fifohandcheck = false; - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); + sdi.mgr_config.video_port_width = 24; + sdi.mgr_config.lcden_sig_polarity = 1; - dispc_mgr_set_tft_data_lines(dssdev->manager->id, 24); - dispc_lcd_enable_signal_polarity(1); + dss_mgr_set_lcd_config(dssdev->manager, &sdi.mgr_config); } int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) @@ -52,8 +54,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) struct omap_video_timings *t = &dssdev->panel.timings; struct dss_clock_info dss_cinfo; struct dispc_clock_info dispc_cinfo; - u16 lck_div, pck_div; - unsigned long fck; unsigned long pck; int r; @@ -76,24 +76,17 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - sdi_basic_init(dssdev); - /* 15.5.9.1.2 */ - dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF; - - dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config, - dssdev->panel.acbi, dssdev->panel.acb); + dssdev->panel.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + dssdev->panel.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; - r = dss_calc_clock_div(1, t->pixel_clock * 1000, - &dss_cinfo, &dispc_cinfo); + r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) goto err_calc_clock_div; - fck = dss_cinfo.fck; - lck_div = dispc_cinfo.lck_div; - pck_div = dispc_cinfo.pck_div; + sdi.mgr_config.clock_info = dispc_cinfo; - pck = fck / lck_div / pck_div / 1000; + pck = dss_cinfo.fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000; if (pck != t->pixel_clock) { DSSWARN("Could not find exact pixel clock. Requested %d kHz, " @@ -110,9 +103,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_set_dss_clock_div; - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) - goto err_set_dispc_clock_div; + sdi_config_lcd_manager(dssdev); dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); @@ -129,7 +120,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) err_mgr_enable: dss_sdi_disable(); err_sdi_enable: -err_set_dispc_clock_div: err_set_dss_clock_div: err_calc_clock_div: dispc_runtime_put(); diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h index e734cb444bc7..b046c208cb97 100644 --- a/drivers/video/omap2/dss/ti_hdmi.h +++ b/drivers/video/omap2/dss/ti_hdmi.h @@ -42,30 +42,13 @@ enum hdmi_clk_refsel { HDMI_REFSEL_SYSCLK = 3 }; -/* HDMI timing structure */ -struct hdmi_video_timings { - u16 x_res; - u16 y_res; - /* Unit: KHz */ - u32 pixel_clock; - u16 hsw; - u16 hfp; - u16 hbp; - u16 vsw; - u16 vfp; - u16 vbp; - bool vsync_pol; - bool hsync_pol; - bool interlace; -}; - struct hdmi_cm { int code; int mode; }; struct hdmi_config { - struct hdmi_video_timings timings; + struct omap_video_timings timings; struct hdmi_cm cm; }; @@ -177,7 +160,7 @@ struct hdmi_ip_data { /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */ int hpd_gpio; - bool phy_tx_enabled; + struct mutex lock; }; int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c index 4dae1b291079..c23b85a20cdc 100644 --- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c +++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c @@ -157,6 +157,10 @@ static int hdmi_pll_init(struct hdmi_ip_data *ip_data) /* PHY_PWR_CMD */ static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val) { + /* Return if already the state */ + if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val) + return 0; + /* Command for power control of HDMI PHY */ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6); @@ -231,21 +235,13 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data) static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) { - unsigned long flags; bool hpd; int r; - /* this should be in ti_hdmi_4xxx_ip private data */ - static DEFINE_SPINLOCK(phy_tx_lock); - spin_lock_irqsave(&phy_tx_lock, flags); + mutex_lock(&ip_data->lock); hpd = gpio_get_value(ip_data->hpd_gpio); - if (hpd == ip_data->phy_tx_enabled) { - spin_unlock_irqrestore(&phy_tx_lock, flags); - return 0; - } - if (hpd) r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON); else @@ -257,9 +253,8 @@ static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) goto err; } - ip_data->phy_tx_enabled = hpd; err: - spin_unlock_irqrestore(&phy_tx_lock, flags); + mutex_unlock(&ip_data->lock); return r; } @@ -327,7 +322,6 @@ void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data) free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data); hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); - ip_data->phy_tx_enabled = false; } static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data) @@ -747,11 +741,15 @@ static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data, static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data) { u32 r; + bool vsync_pol, hsync_pol; pr_debug("Enter hdmi_wp_video_config_interface\n"); + vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG); - r = FLD_MOD(r, ip_data->cfg.timings.vsync_pol, 7, 7); - r = FLD_MOD(r, ip_data->cfg.timings.hsync_pol, 6, 6); + r = FLD_MOD(r, vsync_pol, 7, 7); + r = FLD_MOD(r, hsync_pol, 6, 6); r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3); r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r); diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 3907c8b6ecbc..3a220877461a 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -272,6 +272,8 @@ const struct omap_video_timings omap_dss_pal_timings = { .vsw = 5, .vfp = 5, .vbp = 41, + + .interlace = true, }; EXPORT_SYMBOL(omap_dss_pal_timings); @@ -285,6 +287,8 @@ const struct omap_video_timings omap_dss_ntsc_timings = { .vsw = 6, .vfp = 6, .vbp = 31, + + .interlace = true, }; EXPORT_SYMBOL(omap_dss_ntsc_timings); @@ -930,7 +934,7 @@ static int __exit omap_venchw_remove(struct platform_device *pdev) static int venc_runtime_suspend(struct device *dev) { if (venc.tv_dac_clk) - clk_disable(venc.tv_dac_clk); + clk_disable_unprepare(venc.tv_dac_clk); dispc_runtime_put(); @@ -946,7 +950,7 @@ static int venc_runtime_resume(struct device *dev) return r; if (venc.tv_dac_clk) - clk_enable(venc.tv_dac_clk); + clk_prepare_enable(venc.tv_dac_clk); return 0; } diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 3450ea0966c9..08ec1a7103f2 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -733,6 +733,12 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->lower_margin = timings.vfp; var->hsync_len = timings.hsw; var->vsync_len = timings.vsw; + var->sync |= timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH ? + FB_SYNC_HOR_HIGH_ACT : 0; + var->sync |= timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH ? + FB_SYNC_VERT_HIGH_ACT : 0; + var->vmode = timings.interlace ? + FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED; } else { var->pixclock = 0; var->left_margin = 0; @@ -741,12 +747,10 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->lower_margin = 0; var->hsync_len = 0; var->vsync_len = 0; + var->sync = 0; + var->vmode = FB_VMODE_NONINTERLACED; } - /* TODO: get these from panel->config */ - var->vmode = FB_VMODE_NONINTERLACED; - var->sync = 0; - return 0; } @@ -1993,6 +1997,7 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev) } static int omapfb_mode_to_timings(const char *mode_str, + struct omap_dss_device *display, struct omap_video_timings *timings, u8 *bpp) { struct fb_info *fbi; @@ -2046,6 +2051,14 @@ static int omapfb_mode_to_timings(const char *mode_str, goto err; } + if (display->driver->get_timings) { + display->driver->get_timings(display, timings); + } else { + timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + } + timings->pixel_clock = PICOS2KHZ(var->pixclock); timings->hbp = var->left_margin; timings->hfp = var->right_margin; @@ -2055,6 +2068,13 @@ static int omapfb_mode_to_timings(const char *mode_str, timings->vsw = var->vsync_len; timings->x_res = var->xres; timings->y_res = var->yres; + timings->hsync_level = var->sync & FB_SYNC_HOR_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + timings->vsync_level = var->sync & FB_SYNC_VERT_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + timings->interlace = var->vmode & FB_VMODE_INTERLACED; switch (var->bits_per_pixel) { case 16: @@ -2085,7 +2105,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, struct omap_video_timings timings, temp_timings; struct omapfb_display_data *d; - r = omapfb_mode_to_timings(mode_str, &timings, &bpp); + r = omapfb_mode_to_timings(mode_str, display, &timings, &bpp); if (r) return r; @@ -2178,8 +2198,17 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) } static void fb_videomode_to_omap_timings(struct fb_videomode *m, + struct omap_dss_device *display, struct omap_video_timings *t) { + if (display->driver->get_timings) { + display->driver->get_timings(display, t); + } else { + t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + t->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + } + t->x_res = m->xres; t->y_res = m->yres; t->pixel_clock = PICOS2KHZ(m->pixclock); @@ -2189,6 +2218,13 @@ static void fb_videomode_to_omap_timings(struct fb_videomode *m, t->vsw = m->vsync_len; t->vfp = m->lower_margin; t->vbp = m->upper_margin; + t->hsync_level = m->sync & FB_SYNC_HOR_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + t->vsync_level = m->sync & FB_SYNC_VERT_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + t->interlace = m->vmode & FB_VMODE_INTERLACED; } static int omapfb_find_best_mode(struct omap_dss_device *display, @@ -2231,7 +2267,7 @@ static int omapfb_find_best_mode(struct omap_dss_device *display, if (m->xres == 2880 || m->xres == 1440) continue; - fb_videomode_to_omap_timings(m, &t); + fb_videomode_to_omap_timings(m, display, &t); r = display->driver->check_timings(display, &t); if (r == 0 && best_xres < m->xres) { @@ -2245,7 +2281,8 @@ static int omapfb_find_best_mode(struct omap_dss_device *display, goto err2; } - fb_videomode_to_omap_timings(&specs->modedb[best_idx], timings); + fb_videomode_to_omap_timings(&specs->modedb[best_idx], display, + timings); r = 0; diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 2c80246b18b8..1d007366b917 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -84,7 +84,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", "S3 Virge/GX2", "S3 Virge/GX2+", "", "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X", - "S3 Trio3D"}; + "S3 Trio3D", "S3 Virge/MX"}; #define CHIP_UNKNOWN 0x00 #define CHIP_732_TRIO32 0x01 @@ -105,6 +105,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", #define CHIP_362_TRIO3D_2X 0x11 #define CHIP_368_TRIO3D_2X 0x12 #define CHIP_365_TRIO3D 0x13 +#define CHIP_260_VIRGE_MX 0x14 #define CHIP_XXX_TRIO 0x80 #define CHIP_XXX_TRIO64V2_DXGX 0x81 @@ -280,7 +281,8 @@ static int __devinit s3fb_setup_ddc_bus(struct fb_info *info) */ /* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */ if (par->chip == CHIP_357_VIRGE_GX2 || - par->chip == CHIP_359_VIRGE_GX2P) + par->chip == CHIP_359_VIRGE_GX2P || + par->chip == CHIP_260_VIRGE_MX) svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03); else svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03); @@ -487,7 +489,8 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) { + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) { vga_wseq(par->state.vgabase, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */ vga_wseq(par->state.vgabase, 0x29, r >> 2); /* remaining highest bit of r */ } else @@ -690,7 +693,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) { + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) { vga_wcrt(par->state.vgabase, 0x54, 0x18); /* M parameter */ vga_wcrt(par->state.vgabase, 0x60, 0xff); /* N parameter */ vga_wcrt(par->state.vgabase, 0x61, 0xff); /* L parameter */ @@ -739,7 +743,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_368_TRIO3D_2X || par->chip == CHIP_365_TRIO3D || par->chip == CHIP_375_VIRGE_DX || - par->chip == CHIP_385_VIRGE_GX) { + par->chip == CHIP_385_VIRGE_GX || + par->chip == CHIP_260_VIRGE_MX) { dbytes = info->var.xres * ((bpp+7)/8); vga_wcrt(par->state.vgabase, 0x91, (dbytes + 7) / 8); vga_wcrt(par->state.vgabase, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80); @@ -751,7 +756,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) vga_wcrt(par->state.vgabase, 0x34, 0x00); else /* enable Data Transfer Position Control (DTPC) */ vga_wcrt(par->state.vgabase, 0x34, 0x10); @@ -807,7 +813,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0); else { svga_wcrt_mask(par->state.vgabase, 0x67, 0x10, 0xF0); @@ -837,7 +844,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) hmul = 2; } break; @@ -864,7 +872,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) hmul = 2; } break; @@ -1208,7 +1217,8 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i break; } } else if (par->chip == CHIP_357_VIRGE_GX2 || - par->chip == CHIP_359_VIRGE_GX2P) { + par->chip == CHIP_359_VIRGE_GX2P || + par->chip == CHIP_260_VIRGE_MX) { switch ((regval & 0xC0) >> 6) { case 1: /* 4MB */ info->screen_size = 4 << 20; @@ -1515,6 +1525,7 @@ static struct pci_device_id s3_devices[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8904), .driver_data = CHIP_365_TRIO3D}, + {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8C01), .driver_data = CHIP_260_VIRGE_MX}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c index 4c6b84488561..3951fdae5f68 100644 --- a/drivers/video/sh_mipi_dsi.c +++ b/drivers/video/sh_mipi_dsi.c @@ -127,8 +127,7 @@ static void sh_mipi_shutdown(struct platform_device *pdev) sh_mipi_dsi_enable(mipi, false); } -static int __init sh_mipi_setup(struct sh_mipi *mipi, - struct sh_mipi_dsi_info *pdata) +static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata) { void __iomem *base = mipi->base; struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan; @@ -551,7 +550,7 @@ efindslot: return ret; } -static int __exit sh_mipi_remove(struct platform_device *pdev) +static int __devexit sh_mipi_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -592,7 +591,7 @@ static int __exit sh_mipi_remove(struct platform_device *pdev) } static struct platform_driver sh_mipi_driver = { - .remove = __exit_p(sh_mipi_remove), + .remove = __devexit_p(sh_mipi_remove), .shutdown = sh_mipi_shutdown, .driver = { .name = "sh-mipi-dsi", diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index e672698bd820..699487c287b2 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -12,6 +12,7 @@ #include <linux/backlight.h> #include <linux/clk.h> #include <linux/console.h> +#include <linux/ctype.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/gpio.h> @@ -32,12 +33,176 @@ #include "sh_mobile_lcdcfb.h" +/* ---------------------------------------------------------------------------- + * Overlay register definitions + */ + +#define LDBCR 0xb00 +#define LDBCR_UPC(n) (1 << ((n) + 16)) +#define LDBCR_UPF(n) (1 << ((n) + 8)) +#define LDBCR_UPD(n) (1 << ((n) + 0)) +#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00) +#define LDBBSIFR_EN (1 << 31) +#define LDBBSIFR_VS (1 << 29) +#define LDBBSIFR_BRSEL (1 << 28) +#define LDBBSIFR_MX (1 << 27) +#define LDBBSIFR_MY (1 << 26) +#define LDBBSIFR_CV3 (3 << 24) +#define LDBBSIFR_CV2 (2 << 24) +#define LDBBSIFR_CV1 (1 << 24) +#define LDBBSIFR_CV0 (0 << 24) +#define LDBBSIFR_CV_MASK (3 << 24) +#define LDBBSIFR_LAY_MASK (0xff << 16) +#define LDBBSIFR_LAY_SHIFT 16 +#define LDBBSIFR_ROP3_MASK (0xff << 16) +#define LDBBSIFR_ROP3_SHIFT 16 +#define LDBBSIFR_AL_PL8 (3 << 14) +#define LDBBSIFR_AL_PL1 (2 << 14) +#define LDBBSIFR_AL_PK (1 << 14) +#define LDBBSIFR_AL_1 (0 << 14) +#define LDBBSIFR_AL_MASK (3 << 14) +#define LDBBSIFR_SWPL (1 << 10) +#define LDBBSIFR_SWPW (1 << 9) +#define LDBBSIFR_SWPB (1 << 8) +#define LDBBSIFR_RY (1 << 7) +#define LDBBSIFR_CHRR_420 (2 << 0) +#define LDBBSIFR_CHRR_422 (1 << 0) +#define LDBBSIFR_CHRR_444 (0 << 0) +#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0) +#define LDBBSIFR_RPKF_RGB16 (0x03 << 0) +#define LDBBSIFR_RPKF_RGB24 (0x0b << 0) +#define LDBBSIFR_RPKF_MASK (0x1f << 0) +#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04) +#define LDBBSSZR_BVSS_MASK (0xfff << 16) +#define LDBBSSZR_BVSS_SHIFT 16 +#define LDBBSSZR_BHSS_MASK (0xfff << 0) +#define LDBBSSZR_BHSS_SHIFT 0 +#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08) +#define LDBBLOCR_CVLC_MASK (0xfff << 16) +#define LDBBLOCR_CVLC_SHIFT 16 +#define LDBBLOCR_CHLC_MASK (0xfff << 0) +#define LDBBLOCR_CHLC_SHIFT 0 +#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c) +#define LDBBSMWR_BSMWA_MASK (0xffff << 16) +#define LDBBSMWR_BSMWA_SHIFT 16 +#define LDBBSMWR_BSMW_MASK (0xffff << 0) +#define LDBBSMWR_BSMW_SHIFT 0 +#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10) +#define LDBBSAYR_FG1A_MASK (0xff << 24) +#define LDBBSAYR_FG1A_SHIFT 24 +#define LDBBSAYR_FG1R_MASK (0xff << 16) +#define LDBBSAYR_FG1R_SHIFT 16 +#define LDBBSAYR_FG1G_MASK (0xff << 8) +#define LDBBSAYR_FG1G_SHIFT 8 +#define LDBBSAYR_FG1B_MASK (0xff << 0) +#define LDBBSAYR_FG1B_SHIFT 0 +#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14) +#define LDBBSACR_FG2A_MASK (0xff << 24) +#define LDBBSACR_FG2A_SHIFT 24 +#define LDBBSACR_FG2R_MASK (0xff << 16) +#define LDBBSACR_FG2R_SHIFT 16 +#define LDBBSACR_FG2G_MASK (0xff << 8) +#define LDBBSACR_FG2G_SHIFT 8 +#define LDBBSACR_FG2B_MASK (0xff << 0) +#define LDBBSACR_FG2B_SHIFT 0 +#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18) +#define LDBBSAAR_AP_MASK (0xff << 24) +#define LDBBSAAR_AP_SHIFT 24 +#define LDBBSAAR_R_MASK (0xff << 16) +#define LDBBSAAR_R_SHIFT 16 +#define LDBBSAAR_GY_MASK (0xff << 8) +#define LDBBSAAR_GY_SHIFT 8 +#define LDBBSAAR_B_MASK (0xff << 0) +#define LDBBSAAR_B_SHIFT 0 +#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c) +#define LDBBPPCR_AP_MASK (0xff << 24) +#define LDBBPPCR_AP_SHIFT 24 +#define LDBBPPCR_R_MASK (0xff << 16) +#define LDBBPPCR_R_SHIFT 16 +#define LDBBPPCR_GY_MASK (0xff << 8) +#define LDBBPPCR_GY_SHIFT 8 +#define LDBBPPCR_B_MASK (0xff << 0) +#define LDBBPPCR_B_SHIFT 0 +#define LDBnBBGCL(n) (0xb10 + (n) * 0x04) +#define LDBBBGCL_BGA_MASK (0xff << 24) +#define LDBBBGCL_BGA_SHIFT 24 +#define LDBBBGCL_BGR_MASK (0xff << 16) +#define LDBBBGCL_BGR_SHIFT 16 +#define LDBBBGCL_BGG_MASK (0xff << 8) +#define LDBBBGCL_BGG_SHIFT 8 +#define LDBBBGCL_BGB_MASK (0xff << 0) +#define LDBBBGCL_BGB_SHIFT 0 + #define SIDE_B_OFFSET 0x1000 #define MIRROR_OFFSET 0x2000 #define MAX_XRES 1920 #define MAX_YRES 1080 +enum sh_mobile_lcdc_overlay_mode { + LCDC_OVERLAY_BLEND, + LCDC_OVERLAY_ROP3, +}; + +/* + * struct sh_mobile_lcdc_overlay - LCDC display overlay + * + * @channel: LCDC channel this overlay belongs to + * @cfg: Overlay configuration + * @info: Frame buffer device + * @index: Overlay index (0-3) + * @base: Overlay registers base address + * @enabled: True if the overlay is enabled + * @mode: Overlay blending mode (alpha blend or ROP3) + * @alpha: Global alpha blending value (0-255, for alpha blending mode) + * @rop3: Raster operation (for ROP3 mode) + * @fb_mem: Frame buffer virtual memory address + * @fb_size: Frame buffer size in bytes + * @dma_handle: Frame buffer DMA address + * @base_addr_y: Overlay base address (RGB or luma component) + * @base_addr_c: Overlay base address (chroma component) + * @pan_y_offset: Panning linear offset in bytes (luma component) + * @format: Current pixelf format + * @xres: Horizontal visible resolution + * @xres_virtual: Horizontal total resolution + * @yres: Vertical visible resolution + * @yres_virtual: Vertical total resolution + * @pitch: Overlay line pitch + * @pos_x: Horizontal overlay position + * @pos_y: Vertical overlay position + */ +struct sh_mobile_lcdc_overlay { + struct sh_mobile_lcdc_chan *channel; + + const struct sh_mobile_lcdc_overlay_cfg *cfg; + struct fb_info *info; + + unsigned int index; + unsigned long base; + + bool enabled; + enum sh_mobile_lcdc_overlay_mode mode; + unsigned int alpha; + unsigned int rop3; + + void *fb_mem; + unsigned long fb_size; + + dma_addr_t dma_handle; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long pan_y_offset; + + const struct sh_mobile_lcdc_format_info *format; + unsigned int xres; + unsigned int xres_virtual; + unsigned int yres; + unsigned int yres_virtual; + unsigned int pitch; + int pos_x; + int pos_y; +}; + struct sh_mobile_lcdc_priv { void __iomem *base; int irq; @@ -45,7 +210,10 @@ struct sh_mobile_lcdc_priv { struct device *dev; struct clk *dot_clk; unsigned long lddckr; + struct sh_mobile_lcdc_chan ch[2]; + struct sh_mobile_lcdc_overlay overlays[4]; + struct notifier_block notifier; int started; int forced_fourcc; /* 2 channel LCDC must share fourcc setting */ @@ -141,6 +309,13 @@ static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan, return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]); } +static void lcdc_write_overlay(struct sh_mobile_lcdc_overlay *ovl, + int reg, unsigned long data) +{ + iowrite32(data, ovl->channel->lcdc->base + reg); + iowrite32(data, ovl->channel->lcdc->base + reg + SIDE_B_OFFSET); +} + static void lcdc_write(struct sh_mobile_lcdc_priv *priv, unsigned long reg_offs, unsigned long data) { @@ -384,8 +559,8 @@ sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch, return true; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, - struct fb_info *info); +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, enum sh_mobile_lcdc_entity_event event, @@ -439,7 +614,7 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, fb_videomode_to_var(&var, mode); var.bits_per_pixel = info->var.bits_per_pixel; var.grayscale = info->var.grayscale; - ret = sh_mobile_check_var(&var, info); + ret = sh_mobile_lcdc_check_var(&var, info); break; } @@ -585,7 +760,7 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) return IRQ_HANDLED; } -static int sh_mobile_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) +static int sh_mobile_lcdc_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) { unsigned long ldintr; int ret; @@ -685,8 +860,98 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) lcdc_write_chan(ch, LDHAJR, tmp); } +static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) +{ + u32 format = 0; + + if (!ovl->enabled) { + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), 0); + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + return; + } + + ovl->base_addr_y = ovl->dma_handle; + ovl->base_addr_c = ovl->dma_handle + + ovl->xres_virtual * ovl->yres_virtual; + + switch (ovl->mode) { + case LCDC_OVERLAY_BLEND: + format = LDBBSIFR_EN | (ovl->alpha << LDBBSIFR_LAY_SHIFT); + break; + + case LCDC_OVERLAY_ROP3: + format = LDBBSIFR_EN | LDBBSIFR_BRSEL + | (ovl->rop3 << LDBBSIFR_ROP3_SHIFT); + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV61: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW; + break; + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV24: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB; + break; + case V4L2_PIX_FMT_BGR32: + default: + format |= LDBBSIFR_SWPL; + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16; + break; + case V4L2_PIX_FMT_BGR24: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24; + break; + case V4L2_PIX_FMT_BGR32: + format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32; + break; + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422; + break; + case V4L2_PIX_FMT_NV24: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444; + break; + } + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), format); + + lcdc_write_overlay(ovl, LDBnBSSZR(ovl->index), + (ovl->yres << LDBBSSZR_BVSS_SHIFT) | + (ovl->xres << LDBBSSZR_BHSS_SHIFT)); + lcdc_write_overlay(ovl, LDBnBLOCR(ovl->index), + (ovl->pos_y << LDBBLOCR_CVLC_SHIFT) | + (ovl->pos_x << LDBBLOCR_CHLC_SHIFT)); + lcdc_write_overlay(ovl, LDBnBSMWR(ovl->index), + ovl->pitch << LDBBSMWR_BSMW_SHIFT); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); +} + /* - * __sh_mobile_lcdc_start - Configure and tart the LCDC + * __sh_mobile_lcdc_start - Configure and start the LCDC * @priv: LCDC device * * Configure all enabled channels and start the LCDC device. All external @@ -839,27 +1104,25 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* Compute frame buffer base address and pitch for each channel. */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { int pixelformat; - void *meram; + void *cache; ch = &priv->ch[k]; if (!ch->enabled) continue; ch->base_addr_y = ch->dma_handle; - ch->base_addr_c = ch->base_addr_y + ch->xres * ch->yres_virtual; + ch->base_addr_c = ch->dma_handle + + ch->xres_virtual * ch->yres_virtual; ch->line_size = ch->pitch; /* Enable MERAM if possible. */ - if (mdev == NULL || mdev->ops == NULL || - ch->cfg->meram_cfg == NULL) + if (mdev == NULL || ch->cfg->meram_cfg == NULL) continue; - /* we need to de-init configured ICBs before we can - * re-initialize them. - */ - if (ch->meram) { - mdev->ops->meram_unregister(mdev, ch->meram); - ch->meram = NULL; + /* Free the allocated MERAM cache. */ + if (ch->cache) { + sh_mobile_meram_cache_free(mdev, ch->cache); + ch->cache = NULL; } switch (ch->format->fourcc) { @@ -881,17 +1144,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) break; } - meram = mdev->ops->meram_register(mdev, ch->cfg->meram_cfg, + cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg, ch->pitch, ch->yres, pixelformat, &ch->line_size); - if (!IS_ERR(meram)) { - mdev->ops->meram_update(mdev, meram, + if (!IS_ERR(cache)) { + sh_mobile_meram_cache_update(mdev, cache, ch->base_addr_y, ch->base_addr_c, &ch->base_addr_y, &ch->base_addr_c); - ch->meram = meram; + ch->cache = cache; } } + for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[k]; + sh_mobile_lcdc_overlay_setup(ovl); + } + /* Start the LCDC. */ __sh_mobile_lcdc_start(priv); @@ -953,12 +1221,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_display_off(ch); - /* disable the meram */ - if (ch->meram) { - struct sh_mobile_meram_info *mdev; - mdev = priv->meram_dev; - mdev->ops->meram_unregister(mdev, ch->meram); - ch->meram = 0; + /* Free the MERAM cache. */ + if (ch->cache) { + sh_mobile_meram_cache_free(priv->meram_dev, ch->cache); + ch->cache = 0; } } @@ -975,8 +1241,511 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_clk_off(priv); } +static int __sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + if (var->xres > MAX_XRES || var->yres > MAX_YRES) + return -EINVAL; + + /* Make sure the virtual resolution is at least as big as the visible + * resolution. + */ + if (var->xres_virtual < var->xres) + var->xres_virtual = var->xres; + if (var->yres_virtual < var->yres) + var->yres_virtual = var->yres; + + if (sh_mobile_format_is_fourcc(var)) { + const struct sh_mobile_lcdc_format_info *format; + + format = sh_mobile_format_info(var->grayscale); + if (format == NULL) + return -EINVAL; + var->bits_per_pixel = format->bpp; + + /* Default to RGB and JPEG color-spaces for RGB and YUV formats + * respectively. + */ + if (!format->yuv) + var->colorspace = V4L2_COLORSPACE_SRGB; + else if (var->colorspace != V4L2_COLORSPACE_REC709) + var->colorspace = V4L2_COLORSPACE_JPEG; + } else { + if (var->bits_per_pixel <= 16) { /* RGB 565 */ + var->bits_per_pixel = 16; + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ + var->bits_per_pixel = 24; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ + var->bits_per_pixel = 32; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + } else + return -EINVAL; + + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + } + + /* Make sure we don't exceed our allocated memory. */ + if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > + info->fix.smem_len) + return -EINVAL; + + return 0; +} + /* ----------------------------------------------------------------------------- - * Frame buffer operations + * Frame buffer operations - Overlays + */ + +static ssize_t +overlay_alpha_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->alpha); +} + +static ssize_t +overlay_alpha_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int alpha; + char *endp; + + alpha = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (alpha > 255) + return -EINVAL; + + if (ovl->alpha != alpha) { + ovl->alpha = alpha; + + if (ovl->mode == LCDC_OVERLAY_BLEND && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->mode); +} + +static ssize_t +overlay_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int mode; + char *endp; + + mode = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (mode != LCDC_OVERLAY_BLEND && mode != LCDC_OVERLAY_ROP3) + return -EINVAL; + + if (ovl->mode != mode) { + ovl->mode = mode; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_position_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->pos_x, ovl->pos_y); +} + +static ssize_t +overlay_position_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + char *endp; + int pos_x; + int pos_y; + + pos_x = simple_strtol(buf, &endp, 10); + if (*endp != ',') + return -EINVAL; + + pos_y = simple_strtol(endp + 1, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (ovl->pos_x != pos_x || ovl->pos_y != pos_y) { + ovl->pos_x = pos_x; + ovl->pos_y = pos_y; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_rop3_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->rop3); +} + +static ssize_t +overlay_rop3_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int rop3; + char *endp; + + rop3 = !!simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (rop3 > 255) + return -EINVAL; + + if (ovl->rop3 != rop3) { + ovl->rop3 = rop3; + + if (ovl->mode == LCDC_OVERLAY_ROP3 && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static const struct device_attribute overlay_sysfs_attrs[] = { + __ATTR(ovl_alpha, S_IRUGO|S_IWUSR, + overlay_alpha_show, overlay_alpha_store), + __ATTR(ovl_mode, S_IRUGO|S_IWUSR, + overlay_mode_show, overlay_mode_store), + __ATTR(ovl_position, S_IRUGO|S_IWUSR, + overlay_position_show, overlay_position_store), + __ATTR(ovl_rop3, S_IRUGO|S_IWUSR, + overlay_rop3_show, overlay_rop3_store), +}; + +static const struct fb_fix_screeninfo sh_mobile_lcdc_overlay_fix = { + .id = "SH Mobile LCDC", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, + .xpanstep = 1, + .ypanstep = 1, + .ywrapstep = 0, + .capabilities = FB_CAP_FOURCC, +}; + +static int sh_mobile_lcdc_overlay_pan(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long y_offset; + unsigned long c_offset; + + if (!ovl->format->yuv) { + y_offset = (var->yoffset * ovl->xres_virtual + var->xoffset) + * ovl->format->bpp / 8; + c_offset = 0; + } else { + unsigned int xsub = ovl->format->bpp < 24 ? 2 : 1; + unsigned int ysub = ovl->format->bpp < 16 ? 2 : 1; + + y_offset = var->yoffset * ovl->xres_virtual + var->xoffset; + c_offset = var->yoffset / ysub * ovl->xres_virtual * 2 / xsub + + var->xoffset * 2 / xsub; + } + + /* If the Y offset hasn't changed, the C offset hasn't either. There's + * nothing to do in that case. + */ + if (y_offset == ovl->pan_y_offset) + return 0; + + /* Set the source address for the next refresh */ + base_addr_y = ovl->dma_handle + y_offset; + base_addr_c = ovl->dma_handle + ovl->xres_virtual * ovl->yres_virtual + + c_offset; + + ovl->base_addr_y = base_addr_y; + ovl->base_addr_c = base_addr_c; + ovl->pan_y_offset = y_offset; + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + + return 0; +} + +static int sh_mobile_lcdc_overlay_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + switch (cmd) { + case FBIO_WAITFORVSYNC: + return sh_mobile_lcdc_wait_for_vsync(ovl->channel); + + default: + return -ENOIOCTLCMD; + } +} + +static int sh_mobile_lcdc_overlay_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return __sh_mobile_lcdc_check_var(var, info); +} + +static int sh_mobile_lcdc_overlay_set_par(struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->format = + sh_mobile_format_info(sh_mobile_format_fourcc(&info->var)); + + ovl->xres = info->var.xres; + ovl->xres_virtual = info->var.xres_virtual; + ovl->yres = info->var.yres; + ovl->yres_virtual = info->var.yres_virtual; + + if (ovl->format->yuv) + ovl->pitch = info->var.xres_virtual; + else + ovl->pitch = info->var.xres_virtual * ovl->format->bpp / 8; + + sh_mobile_lcdc_overlay_setup(ovl); + + info->fix.line_length = ovl->pitch; + + if (sh_mobile_format_is_fourcc(&info->var)) { + info->fix.type = FB_TYPE_FOURCC; + info->fix.visual = FB_VISUAL_FOURCC; + } else { + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + } + + return 0; +} + +/* Overlay blanking. Disable the overlay when blanked. */ +static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->enabled = !blank; + sh_mobile_lcdc_overlay_setup(ovl); + + /* Prevent the backlight from receiving a blanking event by returning + * a non-zero value. + */ + return 1; +} + +static struct fb_ops sh_mobile_lcdc_overlay_ops = { + .owner = THIS_MODULE, + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_blank = sh_mobile_lcdc_overlay_blank, + .fb_pan_display = sh_mobile_lcdc_overlay_pan, + .fb_ioctl = sh_mobile_lcdc_overlay_ioctl, + .fb_check_var = sh_mobile_lcdc_overlay_check_var, + .fb_set_par = sh_mobile_lcdc_overlay_set_par, +}; + +static void +sh_mobile_lcdc_overlay_fb_unregister(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->dev == NULL) + return; + + unregister_framebuffer(ovl->info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc; + struct fb_info *info = ovl->info; + unsigned int i; + int ret; + + if (info == NULL) + return 0; + + ret = register_framebuffer(info); + if (ret < 0) + return ret; + + dev_info(lcdc->dev, "registered %s/overlay %u as %dx%d %dbpp.\n", + dev_name(lcdc->dev), ovl->index, info->var.xres, + info->var.yres, info->var.bits_per_pixel); + + for (i = 0; i < ARRAY_SIZE(overlay_sysfs_attrs); ++i) { + ret = device_create_file(info->dev, &overlay_sysfs_attrs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static void +sh_mobile_lcdc_overlay_fb_cleanup(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->device == NULL) + return; + + framebuffer_release(info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *priv = ovl->channel->lcdc; + struct fb_var_screeninfo *var; + struct fb_info *info; + + /* Allocate and initialize the frame buffer device. */ + info = framebuffer_alloc(0, priv->dev); + if (info == NULL) { + dev_err(priv->dev, "unable to allocate fb_info\n"); + return -ENOMEM; + } + + ovl->info = info; + + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &sh_mobile_lcdc_overlay_ops; + info->device = priv->dev; + info->screen_base = ovl->fb_mem; + info->par = ovl; + + /* Initialize fixed screen information. Restrict pan to 2 lines steps + * for NV12 and NV21. + */ + info->fix = sh_mobile_lcdc_overlay_fix; + snprintf(info->fix.id, sizeof(info->fix.id), + "SH Mobile LCDC Overlay %u", ovl->index); + info->fix.smem_start = ovl->dma_handle; + info->fix.smem_len = ovl->fb_size; + info->fix.line_length = ovl->pitch; + + if (ovl->format->yuv) + info->fix.visual = FB_VISUAL_FOURCC; + else + info->fix.visual = FB_VISUAL_TRUECOLOR; + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + info->fix.ypanstep = 2; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + info->fix.xpanstep = 2; + } + + /* Initialize variable screen information. */ + var = &info->var; + memset(var, 0, sizeof(*var)); + var->xres = ovl->xres; + var->yres = ovl->yres; + var->xres_virtual = ovl->xres_virtual; + var->yres_virtual = ovl->yres_virtual; + var->activate = FB_ACTIVATE_NOW; + + /* Use the legacy API by default for RGB formats, and the FOURCC API + * for YUV formats. + */ + if (!ovl->format->yuv) + var->bits_per_pixel = ovl->format->bpp; + else + var->grayscale = ovl->format->fourcc; + + return sh_mobile_lcdc_overlay_check_var(var, info); +} + +/* ----------------------------------------------------------------------------- + * Frame buffer operations - main frame buffer */ static int sh_mobile_lcdc_setcolreg(u_int regno, @@ -1003,12 +1772,12 @@ static int sh_mobile_lcdc_setcolreg(u_int regno, return 0; } -static struct fb_fix_screeninfo sh_mobile_lcdc_fix = { +static const struct fb_fix_screeninfo sh_mobile_lcdc_fix = { .id = "SH Mobile LCDC", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .accel = FB_ACCEL_NONE, - .xpanstep = 0, + .xpanstep = 1, .ypanstep = 1, .ywrapstep = 0, .capabilities = FB_CAP_FOURCC, @@ -1035,78 +1804,74 @@ static void sh_mobile_lcdc_imageblit(struct fb_info *info, sh_mobile_lcdc_deferred_io_touch(info); } -static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) +static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *priv = ch->lcdc; unsigned long ldrcntr; - unsigned long new_pan_offset; unsigned long base_addr_y, base_addr_c; + unsigned long y_offset; unsigned long c_offset; - if (!ch->format->yuv) - new_pan_offset = var->yoffset * ch->pitch - + var->xoffset * (ch->format->bpp / 8); - else - new_pan_offset = var->yoffset * ch->pitch + var->xoffset; + if (!ch->format->yuv) { + y_offset = (var->yoffset * ch->xres_virtual + var->xoffset) + * ch->format->bpp / 8; + c_offset = 0; + } else { + unsigned int xsub = ch->format->bpp < 24 ? 2 : 1; + unsigned int ysub = ch->format->bpp < 16 ? 2 : 1; - if (new_pan_offset == ch->pan_offset) - return 0; /* No change, do nothing */ + y_offset = var->yoffset * ch->xres_virtual + var->xoffset; + c_offset = var->yoffset / ysub * ch->xres_virtual * 2 / xsub + + var->xoffset * 2 / xsub; + } - ldrcntr = lcdc_read(priv, _LDRCNTR); + /* If the Y offset hasn't changed, the C offset hasn't either. There's + * nothing to do in that case. + */ + if (y_offset == ch->pan_y_offset) + return 0; /* Set the source address for the next refresh */ - base_addr_y = ch->dma_handle + new_pan_offset; - if (ch->format->yuv) { - /* Set y offset */ - c_offset = var->yoffset * ch->pitch - * (ch->format->bpp - 8) / 8; - base_addr_c = ch->dma_handle + ch->xres * ch->yres_virtual - + c_offset; - /* Set x offset */ - if (ch->format->fourcc == V4L2_PIX_FMT_NV24) - base_addr_c += 2 * var->xoffset; - else - base_addr_c += var->xoffset; - } + base_addr_y = ch->dma_handle + y_offset; + base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual + + c_offset; - if (ch->meram) { - struct sh_mobile_meram_info *mdev; - - mdev = priv->meram_dev; - mdev->ops->meram_update(mdev, ch->meram, - base_addr_y, base_addr_c, - &base_addr_y, &base_addr_c); - } + if (ch->cache) + sh_mobile_meram_cache_update(priv->meram_dev, ch->cache, + base_addr_y, base_addr_c, + &base_addr_y, &base_addr_c); ch->base_addr_y = base_addr_y; ch->base_addr_c = base_addr_c; + ch->pan_y_offset = y_offset; lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); if (ch->format->yuv) lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); + ldrcntr = lcdc_read(priv, _LDRCNTR); if (lcdc_chan_is_sublcd(ch)) lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); else lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS); - ch->pan_offset = new_pan_offset; sh_mobile_lcdc_deferred_io_touch(info); return 0; } -static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) +static int sh_mobile_lcdc_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) { + struct sh_mobile_lcdc_chan *ch = info->par; int retval; switch (cmd) { case FBIO_WAITFORVSYNC: - retval = sh_mobile_wait_for_vsync(info->par); + retval = sh_mobile_lcdc_wait_for_vsync(ch); break; default: @@ -1158,7 +1923,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) * Locking: both .fb_release() and .fb_open() are called with info->lock held if * user == 1, or with console sem held, if user == 0. */ -static int sh_mobile_release(struct fb_info *info, int user) +static int sh_mobile_lcdc_release(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1179,7 +1944,7 @@ static int sh_mobile_release(struct fb_info *info, int user) return 0; } -static int sh_mobile_open(struct fb_info *info, int user) +static int sh_mobile_lcdc_open(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1192,7 +1957,8 @@ static int sh_mobile_open(struct fb_info *info, int user) return 0; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *p = ch->lcdc; @@ -1200,9 +1966,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in unsigned int best_xres = 0; unsigned int best_yres = 0; unsigned int i; - - if (var->xres > MAX_XRES || var->yres > MAX_YRES) - return -EINVAL; + int ret; /* If board code provides us with a list of available modes, make sure * we use one of them. Find the mode closest to the requested one. The @@ -1237,73 +2001,9 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in var->yres = best_yres; } - /* Make sure the virtual resolution is at least as big as the visible - * resolution. - */ - if (var->xres_virtual < var->xres) - var->xres_virtual = var->xres; - if (var->yres_virtual < var->yres) - var->yres_virtual = var->yres; - - if (sh_mobile_format_is_fourcc(var)) { - const struct sh_mobile_lcdc_format_info *format; - - format = sh_mobile_format_info(var->grayscale); - if (format == NULL) - return -EINVAL; - var->bits_per_pixel = format->bpp; - - /* Default to RGB and JPEG color-spaces for RGB and YUV formats - * respectively. - */ - if (!format->yuv) - var->colorspace = V4L2_COLORSPACE_SRGB; - else if (var->colorspace != V4L2_COLORSPACE_REC709) - var->colorspace = V4L2_COLORSPACE_JPEG; - } else { - if (var->bits_per_pixel <= 16) { /* RGB 565 */ - var->bits_per_pixel = 16; - var->red.offset = 11; - var->red.length = 5; - var->green.offset = 5; - var->green.length = 6; - var->blue.offset = 0; - var->blue.length = 5; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ - var->bits_per_pixel = 24; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ - var->bits_per_pixel = 32; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 24; - var->transp.length = 8; - } else - return -EINVAL; - - var->red.msb_right = 0; - var->green.msb_right = 0; - var->blue.msb_right = 0; - var->transp.msb_right = 0; - } - - /* Make sure we don't exceed our allocated memory. */ - if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > - info->fix.smem_len) - return -EINVAL; + ret = __sh_mobile_lcdc_check_var(var, info); + if (ret < 0) + return ret; /* only accept the forced_fourcc for dual channel configurations */ if (p->forced_fourcc && @@ -1313,7 +2013,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in return 0; } -static int sh_mobile_set_par(struct fb_info *info) +static int sh_mobile_lcdc_set_par(struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; int ret; @@ -1329,9 +2029,9 @@ static int sh_mobile_set_par(struct fb_info *info) ch->yres_virtual = info->var.yres_virtual; if (ch->format->yuv) - ch->pitch = info->var.xres; + ch->pitch = info->var.xres_virtual; else - ch->pitch = info->var.xres * ch->format->bpp / 8; + ch->pitch = info->var.xres_virtual * ch->format->bpp / 8; ret = sh_mobile_lcdc_start(ch->lcdc); if (ret < 0) @@ -1383,8 +2083,8 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info) * mode will reenable the clocks and update the screen in time, * so it does not need this. */ if (!info->fbdefio) { - sh_mobile_wait_for_vsync(ch); - sh_mobile_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); } sh_mobile_lcdc_clk_off(p); } @@ -1402,12 +2102,12 @@ static struct fb_ops sh_mobile_lcdc_ops = { .fb_copyarea = sh_mobile_lcdc_copyarea, .fb_imageblit = sh_mobile_lcdc_imageblit, .fb_blank = sh_mobile_lcdc_blank, - .fb_pan_display = sh_mobile_fb_pan_display, - .fb_ioctl = sh_mobile_ioctl, - .fb_open = sh_mobile_open, - .fb_release = sh_mobile_release, - .fb_check_var = sh_mobile_check_var, - .fb_set_par = sh_mobile_set_par, + .fb_pan_display = sh_mobile_lcdc_pan, + .fb_ioctl = sh_mobile_lcdc_ioctl, + .fb_open = sh_mobile_lcdc_open, + .fb_release = sh_mobile_lcdc_release, + .fb_check_var = sh_mobile_lcdc_check_var, + .fb_set_par = sh_mobile_lcdc_set_par, }; static void @@ -1514,19 +2214,24 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else info->fix.visual = FB_VISUAL_TRUECOLOR; - if (ch->format->fourcc == V4L2_PIX_FMT_NV12 || - ch->format->fourcc == V4L2_PIX_FMT_NV21) + switch (ch->format->fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: info->fix.ypanstep = 2; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + info->fix.xpanstep = 2; + } /* Initialize variable screen information using the first mode as - * default. The default Y virtual resolution is twice the panel size to - * allow for double-buffering. + * default. */ var = &info->var; fb_videomode_to_var(var, mode); var->width = ch->cfg->panel_cfg.width; var->height = ch->cfg->panel_cfg.height; - var->yres_virtual = var->yres * 2; + var->xres_virtual = ch->xres_virtual; + var->yres_virtual = ch->yres_virtual; var->activate = FB_ACTIVATE_NOW; /* Use the legacy API by default for RGB formats, and the FOURCC API @@ -1537,7 +2242,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else var->grayscale = ch->format->fourcc; - ret = sh_mobile_check_var(var, info); + ret = sh_mobile_lcdc_check_var(var, info); if (ret) return ret; @@ -1712,15 +2417,27 @@ static const struct fb_videomode default_720p __devinitconst = { static int sh_mobile_lcdc_remove(struct platform_device *pdev) { struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev); - int i; + unsigned int i; fb_unregister_client(&priv->notifier); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) + sh_mobile_lcdc_overlay_fb_unregister(&priv->overlays[i]); for (i = 0; i < ARRAY_SIZE(priv->ch); i++) sh_mobile_lcdc_channel_fb_unregister(&priv->ch[i]); sh_mobile_lcdc_stop(priv); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + sh_mobile_lcdc_overlay_fb_cleanup(ovl); + + if (ovl->fb_mem) + dma_free_coherent(&pdev->dev, ovl->fb_size, + ovl->fb_mem, ovl->dma_handle); + } + for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; @@ -1737,8 +2454,11 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) } for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { - if (priv->ch[i].bl) - sh_mobile_lcdc_bl_remove(priv->ch[i].bl); + struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; + + if (ch->bl) + sh_mobile_lcdc_bl_remove(ch->bl); + mutex_destroy(&ch->open_lock); } if (priv->dot_clk) { @@ -1796,6 +2516,61 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan * } static int __devinit +sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv, + struct sh_mobile_lcdc_overlay *ovl) +{ + const struct sh_mobile_lcdc_format_info *format; + int ret; + + if (ovl->cfg->fourcc == 0) + return 0; + + /* Validate the format. */ + format = sh_mobile_format_info(ovl->cfg->fourcc); + if (format == NULL) { + dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc); + return -EINVAL; + } + + ovl->enabled = false; + ovl->mode = LCDC_OVERLAY_BLEND; + ovl->alpha = 255; + ovl->rop3 = 0; + ovl->pos_x = 0; + ovl->pos_y = 0; + + /* The default Y virtual resolution is twice the panel size to allow for + * double-buffering. + */ + ovl->format = format; + ovl->xres = ovl->cfg->max_xres; + ovl->xres_virtual = ovl->xres; + ovl->yres = ovl->cfg->max_yres; + ovl->yres_virtual = ovl->yres * 2; + + if (!format->yuv) + ovl->pitch = ovl->xres_virtual * format->bpp / 8; + else + ovl->pitch = ovl->xres_virtual; + + /* Allocate frame buffer memory. */ + ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres + * format->bpp / 8 * 2; + ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size, + &ovl->dma_handle, GFP_KERNEL); + if (!ovl->fb_mem) { + dev_err(priv->dev, "unable to allocate buffer\n"); + return -ENOMEM; + } + + ret = sh_mobile_lcdc_overlay_fb_init(ovl); + if (ret < 0) + return ret; + + return 0; +} + +static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, struct sh_mobile_lcdc_chan *ch) { @@ -1854,7 +2629,9 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, num_modes = cfg->num_modes; } - /* Use the first mode as default. */ + /* Use the first mode as default. The default Y virtual resolution is + * twice the panel size to allow for double-buffering. + */ ch->format = format; ch->xres = mode->xres; ch->xres_virtual = mode->xres; @@ -1863,10 +2640,10 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, if (!format->yuv) { ch->colorspace = V4L2_COLORSPACE_SRGB; - ch->pitch = ch->xres * format->bpp / 8; + ch->pitch = ch->xres_virtual * format->bpp / 8; } else { ch->colorspace = V4L2_COLORSPACE_REC709; - ch->pitch = ch->xres; + ch->pitch = ch->xres_virtual; } ch->display.width = cfg->panel_cfg.width; @@ -1952,7 +2729,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) } init_waitqueue_head(&ch->frame_end_wait); init_completion(&ch->vsync_completion); - ch->pan_offset = 0; /* probe the backlight is there is one defined */ if (ch->cfg->bl_info.max_brightness) @@ -2003,6 +2779,17 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + ovl->cfg = &pdata->overlays[i]; + ovl->channel = &priv->ch[0]; + + error = sh_mobile_lcdc_overlay_init(priv, ovl); + if (error) + goto err1; + } + error = sh_mobile_lcdc_start(priv); if (error) { dev_err(&pdev->dev, "unable to start hardware\n"); @@ -2017,6 +2804,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + error = sh_mobile_lcdc_overlay_fb_register(ovl); + if (error) + goto err1; + } + /* Failure ignored */ priv->notifier.notifier_call = sh_mobile_lcdc_notify; fb_register_client(&priv->notifier); diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h index 5c3bddd2cb72..0f92f6544b94 100644 --- a/drivers/video/sh_mobile_lcdcfb.h +++ b/drivers/video/sh_mobile_lcdcfb.h @@ -47,6 +47,7 @@ struct sh_mobile_lcdc_entity { /* * struct sh_mobile_lcdc_chan - LCDC display channel * + * @pan_y_offset: Panning linear offset in bytes (luma component) * @base_addr_y: Frame buffer viewport base address (luma component) * @base_addr_c: Frame buffer viewport base address (chroma component) * @pitch: Frame buffer line pitch @@ -59,7 +60,7 @@ struct sh_mobile_lcdc_chan { unsigned long *reg_offs; unsigned long ldmt1r_value; unsigned long enabled; /* ME and SE in LDCNT2R */ - void *meram; + void *cache; struct mutex open_lock; /* protects the use counter */ int use_count; @@ -68,7 +69,7 @@ struct sh_mobile_lcdc_chan { unsigned long fb_size; dma_addr_t dma_handle; - unsigned long pan_offset; + unsigned long pan_y_offset; unsigned long frame_end; wait_queue_head_t frame_end_wait; diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c index 82ba830bf95d..7a0ba8bb3fbe 100644 --- a/drivers/video/sh_mobile_meram.c +++ b/drivers/video/sh_mobile_meram.c @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/kernel.h> @@ -194,13 +195,28 @@ static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off) } /* ----------------------------------------------------------------------------- - * Allocation + * MERAM allocation and free + */ + +static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size) +{ + return gen_pool_alloc(priv->pool, size); +} + +static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem, + size_t size) +{ + gen_pool_free(priv->pool, mem, size); +} + +/* ----------------------------------------------------------------------------- + * LCDC cache planes allocation, init, cleanup and free */ /* Allocate ICBs and MERAM for a plane. */ -static int __meram_alloc(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane, - size_t size) +static int meram_plane_alloc(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane, + size_t size) { unsigned long mem; unsigned long idx; @@ -215,7 +231,7 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv, return -ENOMEM; plane->marker = &priv->icbs[idx]; - mem = gen_pool_alloc(priv->pool, size * 1024); + mem = meram_alloc(priv, size * 1024); if (mem == 0) return -ENOMEM; @@ -229,11 +245,11 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv, } /* Free ICBs and MERAM for a plane. */ -static void __meram_free(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane) +static void meram_plane_free(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane) { - gen_pool_free(priv->pool, priv->meram + plane->marker->offset, - plane->marker->size * 1024); + meram_free(priv, priv->meram + plane->marker->offset, + plane->marker->size * 1024); __clear_bit(plane->marker->index, &priv->used_icb); __clear_bit(plane->cache->index, &priv->used_icb); @@ -248,62 +264,6 @@ static int is_nvcolor(int cspace) return 0; } -/* Allocate memory for the ICBs and mark them as used. */ -static struct sh_mobile_meram_fb_cache * -meram_alloc(struct sh_mobile_meram_priv *priv, - const struct sh_mobile_meram_cfg *cfg, - int pixelformat) -{ - struct sh_mobile_meram_fb_cache *cache; - unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; - int ret; - - if (cfg->icb[0].meram_size == 0) - return ERR_PTR(-EINVAL); - - if (nplanes == 2 && cfg->icb[1].meram_size == 0) - return ERR_PTR(-EINVAL); - - cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (cache == NULL) - return ERR_PTR(-ENOMEM); - - cache->nplanes = nplanes; - - ret = __meram_alloc(priv, &cache->planes[0], cfg->icb[0].meram_size); - if (ret < 0) - goto error; - - cache->planes[0].marker->current_reg = 1; - cache->planes[0].marker->pixelformat = pixelformat; - - if (cache->nplanes == 1) - return cache; - - ret = __meram_alloc(priv, &cache->planes[1], cfg->icb[1].meram_size); - if (ret < 0) { - __meram_free(priv, &cache->planes[0]); - goto error; - } - - return cache; - -error: - kfree(cache); - return ERR_PTR(-ENOMEM); -} - -/* Unmark the specified ICB as used. */ -static void meram_free(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_cache *cache) -{ - __meram_free(priv, &cache->planes[0]); - if (cache->nplanes == 2) - __meram_free(priv, &cache->planes[1]); - - kfree(cache); -} - /* Set the next address to fetch. */ static void meram_set_next_addr(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_cache *cache, @@ -355,10 +315,10 @@ meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1)) /* Initialize MERAM. */ -static int meram_init(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane, - unsigned int xres, unsigned int yres, - unsigned int *out_pitch) +static int meram_plane_init(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane, + unsigned int xres, unsigned int yres, + unsigned int *out_pitch) { struct sh_mobile_meram_icb *marker = plane->marker; unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres); @@ -427,8 +387,8 @@ static int meram_init(struct sh_mobile_meram_priv *priv, return 0; } -static void meram_deinit(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane) +static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane) { /* disable ICB */ meram_write_icb(priv->base, plane->cache->index, MExxCTL, @@ -441,20 +401,82 @@ static void meram_deinit(struct sh_mobile_meram_priv *priv, } /* ----------------------------------------------------------------------------- - * Registration/unregistration + * MERAM operations */ -static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, - const struct sh_mobile_meram_cfg *cfg, - unsigned int xres, unsigned int yres, - unsigned int pixelformat, - unsigned int *pitch) +unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata, + size_t size) +{ + struct sh_mobile_meram_priv *priv = pdata->priv; + + return meram_alloc(priv, size); +} +EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc); + +void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem, + size_t size) +{ + struct sh_mobile_meram_priv *priv = pdata->priv; + + meram_free(priv, mem, size); +} +EXPORT_SYMBOL_GPL(sh_mobile_meram_free); + +/* Allocate memory for the ICBs and mark them as used. */ +static struct sh_mobile_meram_fb_cache * +meram_cache_alloc(struct sh_mobile_meram_priv *priv, + const struct sh_mobile_meram_cfg *cfg, + int pixelformat) +{ + unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; + struct sh_mobile_meram_fb_cache *cache; + int ret; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (cache == NULL) + return ERR_PTR(-ENOMEM); + + cache->nplanes = nplanes; + + ret = meram_plane_alloc(priv, &cache->planes[0], + cfg->icb[0].meram_size); + if (ret < 0) + goto error; + + cache->planes[0].marker->current_reg = 1; + cache->planes[0].marker->pixelformat = pixelformat; + + if (cache->nplanes == 1) + return cache; + + ret = meram_plane_alloc(priv, &cache->planes[1], + cfg->icb[1].meram_size); + if (ret < 0) { + meram_plane_free(priv, &cache->planes[0]); + goto error; + } + + return cache; + +error: + kfree(cache); + return ERR_PTR(-ENOMEM); +} + +void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, unsigned int *pitch) { struct sh_mobile_meram_fb_cache *cache; struct sh_mobile_meram_priv *priv = pdata->priv; struct platform_device *pdev = pdata->pdev; + unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; unsigned int out_pitch; + if (priv == NULL) + return ERR_PTR(-ENODEV); + if (pixelformat != SH_MOBILE_MERAM_PF_NV && pixelformat != SH_MOBILE_MERAM_PF_NV24 && pixelformat != SH_MOBILE_MERAM_PF_RGB) @@ -469,10 +491,16 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, return ERR_PTR(-EINVAL); } + if (cfg->icb[0].meram_size == 0) + return ERR_PTR(-EINVAL); + + if (nplanes == 2 && cfg->icb[1].meram_size == 0) + return ERR_PTR(-EINVAL); + mutex_lock(&priv->lock); /* We now register the ICBs and allocate the MERAM regions. */ - cache = meram_alloc(priv, cfg, pixelformat); + cache = meram_cache_alloc(priv, cfg, pixelformat); if (IS_ERR(cache)) { dev_err(&pdev->dev, "MERAM allocation failed (%ld).", PTR_ERR(cache)); @@ -480,42 +508,50 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, } /* initialize MERAM */ - meram_init(priv, &cache->planes[0], xres, yres, &out_pitch); + meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch); *pitch = out_pitch; if (pixelformat == SH_MOBILE_MERAM_PF_NV) - meram_init(priv, &cache->planes[1], xres, (yres + 1) / 2, - &out_pitch); + meram_plane_init(priv, &cache->planes[1], + xres, (yres + 1) / 2, &out_pitch); else if (pixelformat == SH_MOBILE_MERAM_PF_NV24) - meram_init(priv, &cache->planes[1], 2 * xres, (yres + 1) / 2, - &out_pitch); + meram_plane_init(priv, &cache->planes[1], + 2 * xres, (yres + 1) / 2, &out_pitch); err: mutex_unlock(&priv->lock); return cache; } +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc); -static void -sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata, void *data) +void +sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; mutex_lock(&priv->lock); - /* deinit & free */ - meram_deinit(priv, &cache->planes[0]); - if (cache->nplanes == 2) - meram_deinit(priv, &cache->planes[1]); + /* Cleanup and free. */ + meram_plane_cleanup(priv, &cache->planes[0]); + meram_plane_free(priv, &cache->planes[0]); + + if (cache->nplanes == 2) { + meram_plane_cleanup(priv, &cache->planes[1]); + meram_plane_free(priv, &cache->planes[1]); + } - meram_free(priv, cache); + kfree(cache); mutex_unlock(&priv->lock); } - -static void -sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data, - unsigned long base_addr_y, unsigned long base_addr_c, - unsigned long *icb_addr_y, unsigned long *icb_addr_c) +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free); + +void +sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; @@ -527,13 +563,7 @@ sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data, mutex_unlock(&priv->lock); } - -static struct sh_mobile_meram_ops sh_mobile_meram_ops = { - .module = THIS_MODULE, - .meram_register = sh_mobile_meram_register, - .meram_unregister = sh_mobile_meram_unregister, - .meram_update = sh_mobile_meram_update, -}; +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update); /* ----------------------------------------------------------------------------- * Power management @@ -624,7 +654,6 @@ static int __devinit sh_mobile_meram_probe(struct platform_device *pdev) for (i = 0; i < MERAM_ICB_NUM; ++i) priv->icbs[i].index = i; - pdata->ops = &sh_mobile_meram_ops; pdata->priv = priv; pdata->pdev = pdev; diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c index 26f864289498..5533a32c6ca1 100644 --- a/drivers/video/smscufx.c +++ b/drivers/video/smscufx.c @@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf, result = fb_sys_write(info, buf, count, ppos); if (result > 0) { - int start = max((int)(offset / info->fix.line_length) - 1, 0); + int start = max((int)(offset / info->fix.line_length), 0); int lines = min((u32)((result / info->fix.line_length) + 1), (u32)info->var.yres); diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index 90a2e30272ad..2f6b2b835f88 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -1567,6 +1567,18 @@ static void w100_suspend(u32 mode) val = readl(remapped_regs + mmPLL_CNTL); val |= 0x00000004; /* bit2=1 */ writel(val, remapped_regs + mmPLL_CNTL); + + writel(0x00000000, remapped_regs + mmLCDD_CNTL1); + writel(0x00000000, remapped_regs + mmLCDD_CNTL2); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL1); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL2); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL3); + + val = readl(remapped_regs + mmMEM_EXT_CNTL); + val |= 0xF0000000; + val &= ~(0x00000001); + writel(val, remapped_regs + mmMEM_EXT_CNTL); + writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); } } diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index d90062b211f8..92d08e7fcba2 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -91,6 +91,11 @@ static struct w1_family w1_therm_family_DS28EA00 = { .fops = &w1_therm_fops, }; +static struct w1_family w1_therm_family_DS1825 = { + .fid = W1_THERM_DS1825, + .fops = &w1_therm_fops, +}; + struct w1_therm_family_converter { u8 broken; @@ -120,6 +125,10 @@ static struct w1_therm_family_converter w1_therm_families[] = { .f = &w1_therm_family_DS28EA00, .convert = w1_DS18B20_convert_temp }, + { + .f = &w1_therm_family_DS1825, + .convert = w1_DS18B20_convert_temp + } }; static inline int w1_DS18B20_convert_temp(u8 rom[9]) diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index b00ada44a89b..a1f0ce151d53 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -39,6 +39,7 @@ #define W1_EEPROM_DS2431 0x2D #define W1_FAMILY_DS2760 0x30 #define W1_FAMILY_DS2780 0x32 +#define W1_THERM_DS1825 0x3B #define W1_FAMILY_DS2781 0x3D #define W1_THERM_DS28EA00 0x42 diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index a73bea4aa1ba..c20f96b579d9 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -24,6 +24,7 @@ #include <linux/spinlock.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/of.h> #include <mach/bridge-regs.h> /* @@ -192,6 +193,12 @@ static void orion_wdt_shutdown(struct platform_device *pdev) orion_wdt_stop(&orion_wdt); } +static const struct of_device_id orion_wdt_of_match_table[] __devinitdata = { + { .compatible = "marvell,orion-wdt", }, + {}, +}; +MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table); + static struct platform_driver orion_wdt_driver = { .probe = orion_wdt_probe, .remove = __devexit_p(orion_wdt_remove), @@ -199,6 +206,7 @@ static struct platform_driver orion_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = "orion_wdt", + .of_match_table = of_match_ptr(orion_wdt_of_match_table), }, }; diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index 181fa8158a8b..858c9714b2f3 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c @@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; */ struct zorro_bus { - struct list_head devices; /* list of devices on this bus */ struct device dev; }; @@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) if (!bus) return -ENOMEM; - INIT_LIST_HEAD(&bus->devices); bus->dev.parent = &pdev->dev; dev_set_name(&bus->dev, "zorro"); error = device_register(&bus->dev); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index fc06fd27065e..dd6f7ee1e312 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -610,6 +610,9 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", page, (unsigned long)filp->private_data); + /* Update file times before taking page lock */ + file_update_time(filp); + v9inode = V9FS_I(inode); /* make sure the cache has finished storing the page */ v9fs_fscache_wait_on_page_write(inode, page); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 1feb68ecef95..842d00048a65 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -94,25 +94,21 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev, { struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); struct list_head *next; - struct dentry *p, *q; + struct dentry *q; spin_lock(&sbi->lookup_lock); + spin_lock(&root->d_lock); - if (prev == NULL) { - spin_lock(&root->d_lock); + if (prev) + next = prev->d_u.d_child.next; + else { prev = dget_dlock(root); next = prev->d_subdirs.next; - p = prev; - goto start; } - p = prev; - spin_lock(&p->d_lock); -again: - next = p->d_u.d_child.next; -start: +cont: if (next == &root->d_subdirs) { - spin_unlock(&p->d_lock); + spin_unlock(&root->d_lock); spin_unlock(&sbi->lookup_lock); dput(prev); return NULL; @@ -121,16 +117,15 @@ start: q = list_entry(next, struct dentry, d_u.d_child); spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED); - /* Negative dentry - try next */ - if (!simple_positive(q)) { - spin_unlock(&p->d_lock); - lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_); - p = q; - goto again; + /* Already gone or negative dentry (under construction) - try next */ + if (q->d_count == 0 || !simple_positive(q)) { + spin_unlock(&q->d_lock); + next = q->d_u.d_child.next; + goto cont; } dget_dlock(q); spin_unlock(&q->d_lock); - spin_unlock(&p->d_lock); + spin_unlock(&root->d_lock); spin_unlock(&sbi->lookup_lock); dput(prev); @@ -404,11 +399,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, DPRINTK("checking mountpoint %p %.*s", dentry, (int)dentry->d_name.len, dentry->d_name.name); - /* Path walk currently on this dentry? */ - ino_count = atomic_read(&ino->count) + 2; - if (dentry->d_count > ino_count) - goto next; - /* Can we umount this guy */ if (autofs4_mount_busy(mnt, dentry)) goto next; @@ -1312,7 +1312,7 @@ EXPORT_SYMBOL(bio_copy_kern); * Note that this code is very hard to test under normal circumstances because * direct-io pins the pages with get_user_pages(). This makes * is_page_cache_freeable return false, and the VM will not clean the pages. - * But other code (eg, pdflush) could clean the pages if they are mapped + * But other code (eg, flusher threads) could clean the pages if they are mapped * pagecache. * * Simply disabling the call to bio_set_pages_dirty() is a good way to test the diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fadeba6a5db9..62e0cafd6e25 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1614,8 +1614,6 @@ static int cleaner_kthread(void *arg) struct btrfs_root *root = arg; do { - vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); - if (!(root->fs_info->sb->s_flags & MS_RDONLY) && mutex_trylock(&root->fs_info->cleaner_mutex)) { btrfs_run_delayed_iputs(root); @@ -1647,7 +1645,6 @@ static int transaction_kthread(void *arg) do { cannot_commit = false; delay = HZ * 30; - vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 9aa01ec2138d..5caf285c6e4d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1379,7 +1379,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ssize_t err = 0; size_t count, ocount; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); @@ -1469,6 +1469,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, num_written = err; } out: + sb_end_write(inode->i_sb); current->backing_dev_info = NULL; return num_written ? num_written : err; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 48bdfd2591c2..6e8f416773d4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -324,7 +324,8 @@ static noinline int add_async_extent(struct async_cow *cow, * If this code finds it can't get good compression, it puts an * entry onto the work queue to write the uncompressed bytes. This * makes sure that both compressed inodes and uncompressed inodes - * are written in the same order that pdflush sent them down. + * are written in the same order that the flusher thread sent them + * down. */ static noinline int compress_file_range(struct inode *inode, struct page *locked_page, @@ -6629,6 +6630,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) u64 page_start; u64 page_end; + sb_start_pagefault(inode->i_sb); ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (!ret) { ret = file_update_time(vma->vm_file); @@ -6718,12 +6720,15 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); out_unlock: - if (!ret) + if (!ret) { + sb_end_pagefault(inode->i_sb); return VM_FAULT_LOCKED; + } unlock_page(page); out: btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); out_noreserve: + sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 43f0012016e3..7bb755677a22 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -195,6 +195,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) if (!inode_owner_or_capable(inode)) return -EACCES; + ret = mnt_want_write_file(file); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); ip_oldflags = ip->flags; @@ -209,10 +213,6 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) } } - ret = mnt_want_write_file(file); - if (ret) - goto out_unlock; - if (flags & FS_SYNC_FL) ip->flags |= BTRFS_INODE_SYNC; else @@ -275,9 +275,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) inode->i_flags = i_oldflags; } - mnt_drop_write_file(file); out_unlock: mutex_unlock(&inode->i_mutex); + mnt_drop_write_file(file); return ret; } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 643335a4fe3c..051c7fe551dd 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode, /* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting - * for pdflush to find them + * for the flusher thread to find them */ if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) filemap_fdatawrite_range(inode->i_mapping, start, end); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8c6e61d6eed5..f2eb24c477a3 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -100,10 +100,6 @@ static void __save_error_info(struct btrfs_fs_info *fs_info) fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR; } -/* NOTE: - * We move write_super stuff at umount in order to avoid deadlock - * for umount hold all lock. - */ static void save_error_info(struct btrfs_fs_info *fs_info) { __save_error_info(fs_info); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 7ac7cdcc294e..17be3dedacba 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -335,6 +335,8 @@ again: if (!h) return ERR_PTR(-ENOMEM); + sb_start_intwrite(root->fs_info->sb); + if (may_wait_transaction(root, type)) wait_current_trans(root); @@ -345,6 +347,7 @@ again: } while (ret == -EBUSY); if (ret < 0) { + sb_end_intwrite(root->fs_info->sb); kmem_cache_free(btrfs_trans_handle_cachep, h); return ERR_PTR(ret); } @@ -548,6 +551,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); trans->block_rsv = NULL; + sb_end_intwrite(root->fs_info->sb); + if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && should_end_transaction(trans, root)) { trans->transaction->blocked = 1; @@ -1578,6 +1583,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, put_transaction(cur_trans); put_transaction(cur_trans); + sb_end_intwrite(root->fs_info->sb); + trace_btrfs_transaction_commit(root); btrfs_scrub_continue(root); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b8708f994e67..e86ae04abe6a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1744,10 +1744,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->fs_devices = root->fs_info->fs_devices; - /* - * we don't want write_supers to jump in here with our device - * half setup - */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, diff --git a/fs/buffer.c b/fs/buffer.c index c7062c896d7c..9f6d2e41281d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2306,8 +2306,8 @@ EXPORT_SYMBOL(block_commit_write); * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. * - * Direct callers of this function should call vfs_check_frozen() so that page - * fault does not busyloop until the fs is thawed. + * Direct callers of this function should protect against filesystem freezing + * using sb_start_write() - sb_end_write() functions. */ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) @@ -2318,6 +2318,12 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, loff_t size; int ret; + /* + * Update file times before taking page lock. We may end up failing the + * fault so this update may be superfluous but who really cares... + */ + file_update_time(vma->vm_file); + lock_page(page); size = i_size_read(inode); if ((page->mapping != inode->i_mapping) || @@ -2339,18 +2345,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if (unlikely(ret < 0)) goto out_unlock; - /* - * Freezing in progress? We check after the page is marked dirty and - * with page lock held so if the test here fails, we are sure freezing - * code will wait during syncing until the page fault is done - at that - * point page will be dirty and unlocked so freezing code will write it - * and writeprotect it again. - */ set_page_dirty(page); - if (inode->i_sb->s_frozen != SB_UNFROZEN) { - ret = -EAGAIN; - goto out_unlock; - } wait_on_page_writeback(page); return 0; out_unlock: @@ -2365,12 +2360,9 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, int ret; struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; - /* - * This check is racy but catches the common case. The check in - * __block_page_mkwrite() is reliable. - */ - vfs_check_frozen(sb, SB_FREEZE_WRITE); + sb_start_pagefault(sb); ret = __block_page_mkwrite(vma, vmf, get_block); + sb_end_pagefault(sb); return block_page_mkwrite_return(ret); } EXPORT_SYMBOL(block_page_mkwrite); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 8b67304e4b80..452e71a1b753 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1184,6 +1184,9 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) loff_t size, len; int ret; + /* Update time before taking page lock */ + file_update_time(vma->vm_file); + size = i_size_read(inode); if (off + PAGE_CACHE_SIZE <= size) len = PAGE_CACHE_SIZE; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f391f1e75414..e5b77319c97b 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -633,44 +633,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, return dentry; } -int ceph_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, umode_t mode, - int *opened) -{ - int err; - struct dentry *res = NULL; - - if (!(flags & O_CREAT)) { - if (dentry->d_name.len > NAME_MAX) - return -ENAMETOOLONG; - - err = ceph_init_dentry(dentry); - if (err < 0) - return err; - - return ceph_lookup_open(dir, dentry, file, flags, mode, opened); - } - - if (d_unhashed(dentry)) { - res = ceph_lookup(dir, dentry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - if (res) - dentry = res; - } - - /* We don't deal with positive dentries here */ - if (dentry->d_inode) - return finish_no_open(file, res); - - *opened |= FILE_CREATED; - err = ceph_lookup_open(dir, dentry, file, flags, mode, opened); - dput(res); - - return err; -} - /* * If we do a create but get no trace back from the MDS, follow up with * a lookup (the VFS expects us to link up the provided dentry). diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 1b81d6c31878..ecebbc09bfc7 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -4,6 +4,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/file.h> +#include <linux/mount.h> #include <linux/namei.h> #include <linux/writeback.h> @@ -106,9 +107,6 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) } /* - * If the filp already has private_data, that means the file was - * already opened by intent during lookup, and we do nothing. - * * If we already have the requisite capabilities, we can satisfy * the open request locally (no need to request new caps from the * MDS). We do, however, need to inform the MDS (asynchronously) @@ -207,24 +205,29 @@ out: /* - * Do a lookup + open with a single request. - * - * If this succeeds, but some subsequent check in the vfs - * may_open() fails, the struct *file gets cleaned up (i.e. - * ceph_release gets called). So fear not! + * Do a lookup + open with a single request. If we get a non-existent + * file or symlink, return 1 so the VFS can retry. */ -int ceph_lookup_open(struct inode *dir, struct dentry *dentry, +int ceph_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned flags, umode_t mode, int *opened) { struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req; - struct dentry *ret; + struct dentry *dn; int err; - dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", - dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); + dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n", + dir, dentry, dentry->d_name.len, dentry->d_name.name, + d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); + + if (dentry->d_name.len > NAME_MAX) + return -ENAMETOOLONG; + + err = ceph_init_dentry(dentry); + if (err < 0) + return err; /* do the open */ req = prepare_open_request(dir->i_sb, flags, mode); @@ -241,22 +244,31 @@ int ceph_lookup_open(struct inode *dir, struct dentry *dentry, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); err = ceph_handle_snapdir(req, dentry, err); - if (err) - goto out; - if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) + if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); - if (err) - goto out; - err = finish_open(file, req->r_dentry, ceph_open, opened); -out: - ret = ceph_finish_lookup(req, dentry, err); - ceph_mdsc_put_request(req); - dout("ceph_lookup_open result=%p\n", ret); - if (IS_ERR(ret)) - return PTR_ERR(ret); + if (d_unhashed(dentry)) { + dn = ceph_finish_lookup(req, dentry, err); + if (IS_ERR(dn)) + err = PTR_ERR(dn); + } else { + /* we were given a hashed negative dentry */ + dn = NULL; + } + if (err) + goto out_err; + if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { + /* make vfs retry on splice, ENOENT, or symlink */ + dout("atomic_open finish_no_open on dn %p\n", dn); + err = finish_no_open(file, dn); + } else { + dout("atomic_open finish_open on dn %p\n", dn); + err = finish_open(file, dentry, ceph_open, opened); + } - dput(ret); +out_err: + ceph_mdsc_put_request(req); + dout("atomic_open result=%d\n", err); return err; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ebc95cc652be..66ebe720e40d 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -806,9 +806,9 @@ extern int ceph_copy_from_page_vector(struct page **pages, loff_t off, size_t len); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); extern int ceph_open(struct inode *inode, struct file *file); -extern int ceph_lookup_open(struct inode *dir, struct dentry *dentry, - struct file *od, unsigned flags, - umode_t mode, int *opened); +extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, umode_t mode, + int *opened); extern int ceph_release(struct inode *inode, struct file *filp); /* dir.c */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 497da5ce704c..977dc0e85ccb 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -246,6 +246,16 @@ struct smb_version_operations { bool (*can_echo)(struct TCP_Server_Info *); /* send echo request */ int (*echo)(struct TCP_Server_Info *); + /* create directory */ + int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *, + struct cifs_sb_info *); + /* set info on created directory */ + void (*mkdir_setinfo)(struct inode *, const char *, + struct cifs_sb_info *, struct cifs_tcon *, + const unsigned int); + /* remove directory */ + int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *, + struct cifs_sb_info *); }; struct smb_version_values { diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index cf7fb185103c..f1bbf8305d3a 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -289,18 +289,15 @@ extern int CIFSSMBUnixSetFileInfo(const unsigned int xid, u16 fid, u32 pid_of_opener); extern int CIFSSMBUnixSetPathInfo(const unsigned int xid, - struct cifs_tcon *tcon, char *file_name, + struct cifs_tcon *tcon, const char *file_name, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, - int remap_special_chars); + int remap); extern int CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *newName, - const struct nls_table *nls_codepage, - int remap_special_chars); + const char *name, struct cifs_sb_info *cifs_sb); extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *name, const struct nls_table *nls_codepage, - int remap_special_chars); + const char *name, struct cifs_sb_info *cifs_sb); extern int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon, const char *name, __u16 type, const struct nls_table *nls_codepage, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index cabc7a01f5df..074923ce593d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -948,15 +948,15 @@ DelFileRetry: } int -CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *dirName, const struct nls_table *nls_codepage, - int remap) +CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) { DELETE_DIRECTORY_REQ *pSMB = NULL; DELETE_DIRECTORY_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; int name_len; + int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cFYI(1, "In CIFSSMBRmDir"); RmDirRetry: @@ -966,14 +966,15 @@ RmDirRetry: return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { - name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName, - PATH_MAX, nls_codepage, remap); + name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, + PATH_MAX, cifs_sb->local_nls, + remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ - name_len = strnlen(dirName, PATH_MAX); + name_len = strnlen(name, PATH_MAX); name_len++; /* trailing null */ - strncpy(pSMB->DirName, dirName, name_len); + strncpy(pSMB->DirName, name, name_len); } pSMB->BufferFormat = 0x04; @@ -992,14 +993,15 @@ RmDirRetry: } int -CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, - const char *name, const struct nls_table *nls_codepage, int remap) +CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) { int rc = 0; CREATE_DIRECTORY_REQ *pSMB = NULL; CREATE_DIRECTORY_RSP *pSMBr = NULL; int bytes_returned; int name_len; + int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR; cFYI(1, "In CIFSSMBMkDir"); MkDirRetry: @@ -1010,7 +1012,8 @@ MkDirRetry: if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name, - PATH_MAX, nls_codepage, remap); + PATH_MAX, cifs_sb->local_nls, + remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve check for buffer overruns BB */ @@ -5943,7 +5946,7 @@ CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, int CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, - char *fileName, + const char *file_name, const struct cifs_unix_set_info_args *args, const struct nls_table *nls_codepage, int remap) { @@ -5964,14 +5967,14 @@ setPermsRetry: if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = - cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName, + cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name, PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(fileName, PATH_MAX); + name_len = strnlen(file_name, PATH_MAX); name_len++; /* trailing null */ - strncpy(pSMB->FileName, fileName, name_len); + strncpy(pSMB->FileName, file_name, name_len); } params = 6 + name_len; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 35cb6a374a45..7354877fa3bd 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1219,16 +1219,153 @@ unlink_out: return rc; } +static int +cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode, + const char *full_path, struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, const unsigned int xid) +{ + int rc = 0; + struct inode *newinode = NULL; + + if (tcon->unix_ext) + rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, + xid); + else + rc = cifs_get_inode_info(&newinode, full_path, NULL, + inode->i_sb, xid, NULL); + if (rc) + return rc; + + d_instantiate(dentry, newinode); + /* + * setting nlink not necessary except in cases where we failed to get it + * from the server or was set bogus + */ + if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) + set_nlink(dentry->d_inode, 2); + + mode &= ~current_umask(); + /* must turn on setgid bit if parent dir has it */ + if (inode->i_mode & S_ISGID) + mode |= S_ISGID; + + if (tcon->unix_ext) { + struct cifs_unix_set_info_args args = { + .mode = mode, + .ctime = NO_CHANGE_64, + .atime = NO_CHANGE_64, + .mtime = NO_CHANGE_64, + .device = 0, + }; + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { + args.uid = (__u64)current_fsuid(); + if (inode->i_mode & S_ISGID) + args.gid = (__u64)inode->i_gid; + else + args.gid = (__u64)current_fsgid(); + } else { + args.uid = NO_CHANGE_64; + args.gid = NO_CHANGE_64; + } + CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); + } else { + struct TCP_Server_Info *server = tcon->ses->server; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && + (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) + server->ops->mkdir_setinfo(newinode, full_path, cifs_sb, + tcon, xid); + if (dentry->d_inode) { + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) + dentry->d_inode->i_mode = (mode | S_IFDIR); + + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { + dentry->d_inode->i_uid = current_fsuid(); + if (inode->i_mode & S_ISGID) + dentry->d_inode->i_gid = inode->i_gid; + else + dentry->d_inode->i_gid = + current_fsgid(); + } + } + } + return rc; +} + +static int +cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode, + const char *full_path, struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, const unsigned int xid) +{ + int rc = 0; + u32 oplock = 0; + FILE_UNIX_BASIC_INFO *info = NULL; + struct inode *newinode = NULL; + struct cifs_fattr fattr; + + info = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); + if (info == NULL) { + rc = -ENOMEM; + goto posix_mkdir_out; + } + + mode &= ~current_umask(); + rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, mode, + NULL /* netfid */, info, &oplock, full_path, + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); + if (rc == -EOPNOTSUPP) + goto posix_mkdir_out; + else if (rc) { + cFYI(1, "posix mkdir returned 0x%x", rc); + d_drop(dentry); + goto posix_mkdir_out; + } + + if (info->Type == cpu_to_le32(-1)) + /* no return info, go query for it */ + goto posix_mkdir_get_info; + /* + * BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if + * need to set uid/gid. + */ + + cifs_unix_basic_to_fattr(&fattr, info, cifs_sb); + cifs_fill_uniqueid(inode->i_sb, &fattr); + newinode = cifs_iget(inode->i_sb, &fattr); + if (!newinode) + goto posix_mkdir_get_info; + + d_instantiate(dentry, newinode); + +#ifdef CONFIG_CIFS_DEBUG2 + cFYI(1, "instantiated dentry %p %s to inode %p", dentry, + dentry->d_name.name, newinode); + + if (newinode->i_nlink != 2) + cFYI(1, "unexpected number of links %d", newinode->i_nlink); +#endif + +posix_mkdir_out: + kfree(info); + return rc; +posix_mkdir_get_info: + rc = cifs_mkdir_qinfo(inode, dentry, mode, full_path, cifs_sb, tcon, + xid); + goto posix_mkdir_out; +} + int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) { - int rc = 0, tmprc; + int rc = 0; unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; - char *full_path = NULL; - struct inode *newinode = NULL; - struct cifs_fattr fattr; + struct TCP_Server_Info *server; + char *full_path; cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode); @@ -1248,145 +1385,29 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode) if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { - u32 oplock = 0; - FILE_UNIX_BASIC_INFO *pInfo = - kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); - if (pInfo == NULL) { - rc = -ENOMEM; + rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb, + tcon, xid); + if (rc != -EOPNOTSUPP) goto mkdir_out; - } - - mode &= ~current_umask(); - rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, - mode, NULL /* netfid */, pInfo, &oplock, - full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - if (rc == -EOPNOTSUPP) { - kfree(pInfo); - goto mkdir_retry_old; - } else if (rc) { - cFYI(1, "posix mkdir returned 0x%x", rc); - d_drop(direntry); - } else { - if (pInfo->Type == cpu_to_le32(-1)) { - /* no return info, go query for it */ - kfree(pInfo); - goto mkdir_get_info; - } -/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need - to set uid/gid */ - - cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); - cifs_fill_uniqueid(inode->i_sb, &fattr); - newinode = cifs_iget(inode->i_sb, &fattr); - if (!newinode) { - kfree(pInfo); - goto mkdir_get_info; - } - - d_instantiate(direntry, newinode); + } -#ifdef CONFIG_CIFS_DEBUG2 - cFYI(1, "instantiated dentry %p %s to inode %p", - direntry, direntry->d_name.name, newinode); + server = tcon->ses->server; - if (newinode->i_nlink != 2) - cFYI(1, "unexpected number of links %d", - newinode->i_nlink); -#endif - } - kfree(pInfo); + if (!server->ops->mkdir) { + rc = -ENOSYS; goto mkdir_out; } -mkdir_retry_old: + /* BB add setting the equivalent of mode via CreateX w/ACLs */ - rc = CIFSSMBMkDir(xid, tcon, full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + rc = server->ops->mkdir(xid, tcon, full_path, cifs_sb); if (rc) { cFYI(1, "cifs_mkdir returned 0x%x", rc); d_drop(direntry); - } else { -mkdir_get_info: - if (tcon->unix_ext) - rc = cifs_get_inode_info_unix(&newinode, full_path, - inode->i_sb, xid); - else - rc = cifs_get_inode_info(&newinode, full_path, NULL, - inode->i_sb, xid, NULL); - - d_instantiate(direntry, newinode); - /* setting nlink not necessary except in cases where we - * failed to get it from the server or was set bogus */ - if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2)) - set_nlink(direntry->d_inode, 2); - - mode &= ~current_umask(); - /* must turn on setgid bit if parent dir has it */ - if (inode->i_mode & S_ISGID) - mode |= S_ISGID; - - if (tcon->unix_ext) { - struct cifs_unix_set_info_args args = { - .mode = mode, - .ctime = NO_CHANGE_64, - .atime = NO_CHANGE_64, - .mtime = NO_CHANGE_64, - .device = 0, - }; - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { - args.uid = (__u64)current_fsuid(); - if (inode->i_mode & S_ISGID) - args.gid = (__u64)inode->i_gid; - else - args.gid = (__u64)current_fsgid(); - } else { - args.uid = NO_CHANGE_64; - args.gid = NO_CHANGE_64; - } - CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - } else { - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && - (mode & S_IWUGO) == 0) { - FILE_BASIC_INFO pInfo; - struct cifsInodeInfo *cifsInode; - u32 dosattrs; - - memset(&pInfo, 0, sizeof(pInfo)); - cifsInode = CIFS_I(newinode); - dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; - pInfo.Attributes = cpu_to_le32(dosattrs); - tmprc = CIFSSMBSetPathInfo(xid, tcon, - full_path, &pInfo, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - if (tmprc == 0) - cifsInode->cifsAttrs = dosattrs; - } - if (direntry->d_inode) { - if (cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_DYNPERM) - direntry->d_inode->i_mode = - (mode | S_IFDIR); - - if (cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_SET_UID) { - direntry->d_inode->i_uid = - current_fsuid(); - if (inode->i_mode & S_ISGID) - direntry->d_inode->i_gid = - inode->i_gid; - else - direntry->d_inode->i_gid = - current_fsgid(); - } - } - } + goto mkdir_out; } + + rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon, + xid); mkdir_out: /* * Force revalidate to get parent dir info when needed since cached @@ -1405,7 +1426,8 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) unsigned int xid; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; - struct cifs_tcon *pTcon; + struct cifs_tcon *tcon; + struct TCP_Server_Info *server; char *full_path = NULL; struct cifsInodeInfo *cifsInode; @@ -1425,10 +1447,16 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) rc = PTR_ERR(tlink); goto rmdir_exit; } - pTcon = tlink_tcon(tlink); + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + + if (!server->ops->rmdir) { + rc = -ENOSYS; + cifs_put_tlink(tlink); + goto rmdir_exit; + } - rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb); cifs_put_tlink(tlink); if (!rc) { diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index c40356d24c5c..3129ac74b819 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -586,6 +586,27 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon) #endif } +static void +cifs_mkdir_setinfo(struct inode *inode, const char *full_path, + struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, + const unsigned int xid) +{ + FILE_BASIC_INFO info; + struct cifsInodeInfo *cifsInode; + u32 dosattrs; + int rc; + + memset(&info, 0, sizeof(info)); + cifsInode = CIFS_I(inode); + dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; + info.Attributes = cpu_to_le32(dosattrs); + rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); + if (rc == 0) + cifsInode->cifsAttrs = dosattrs; +} + struct smb_version_operations smb1_operations = { .send_cancel = send_nt_cancel, .compare_fids = cifs_compare_fids, @@ -620,6 +641,9 @@ struct smb_version_operations smb1_operations = { .get_srv_inum = cifs_get_srv_inum, .build_path_to_root = cifs_build_path_to_root, .echo = CIFSSMBEcho, + .mkdir = CIFSSMBMkDir, + .mkdir_setinfo = cifs_mkdir_setinfo, + .rmdir = CIFSSMBRmDir, }; struct smb_version_values smb1_values = { diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 1ba5c405315c..2aa5cb08c526 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -122,3 +122,42 @@ out: kfree(smb2_data); return rc; } + +int +smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) +{ + return smb2_open_op_close(xid, tcon, cifs_sb, name, + FILE_WRITE_ATTRIBUTES, FILE_CREATE, 0, + CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR); +} + +void +smb2_mkdir_setinfo(struct inode *inode, const char *name, + struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, + const unsigned int xid) +{ + FILE_BASIC_INFO data; + struct cifsInodeInfo *cifs_i; + u32 dosattrs; + int tmprc; + + memset(&data, 0, sizeof(data)); + cifs_i = CIFS_I(inode); + dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; + data.Attributes = cpu_to_le32(dosattrs); + tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name, + FILE_WRITE_ATTRIBUTES, FILE_CREATE, 0, + CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO); + if (tmprc == 0) + cifs_i->cifsAttrs = dosattrs; +} + +int +smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, + struct cifs_sb_info *cifs_sb) +{ + return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, + 0, CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE, + NULL, SMB2_OP_DELETE); +} diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 410cf925ea26..826209bf3684 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -318,6 +318,9 @@ struct smb_version_operations smb21_operations = { .query_path_info = smb2_query_path_info, .get_srv_inum = smb2_get_srv_inum, .build_path_to_root = smb2_build_path_to_root, + .mkdir = smb2_mkdir, + .mkdir_setinfo = smb2_mkdir_setinfo, + .rmdir = smb2_rmdir, }; struct smb_version_values smb21_values = { diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 902bbe2b5ad3..bfaa7b148afd 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -52,6 +52,14 @@ extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, FILE_ALL_INFO *data, bool *adjust_tz); +extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, + const char *name, struct cifs_sb_info *cifs_sb); +extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path, + struct cifs_sb_info *cifs_sb, + struct cifs_tcon *tcon, const unsigned int xid); +extern int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, + const char *name, struct cifs_sb_info *cifs_sb); + /* * SMB2 Worker functions - most of protocol specific implementation details * are contained within these calls. diff --git a/fs/compat.c b/fs/compat.c index 6161255fac45..1bdb350ea5d3 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1155,11 +1155,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, struct file *file; int fput_needed; ssize_t ret; + loff_t pos; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_readv(file, vec, vlen, &file->f_pos); + pos = file->f_pos; + ret = compat_readv(file, vec, vlen, &pos); + file->f_pos = pos; fput_light(file, fput_needed); return ret; } @@ -1221,11 +1224,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, struct file *file; int fput_needed; ssize_t ret; + loff_t pos; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_writev(file, vec, vlen, &file->f_pos); + pos = file->f_pos; + ret = compat_writev(file, vec, vlen, &pos); + file->f_pos = pos; fput_light(file, fput_needed); return ret; } diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 989e034f02bd..cfb4b9fed520 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -385,8 +385,6 @@ struct ecryptfs_msg_ctx { struct mutex mux; }; -struct ecryptfs_daemon; - struct ecryptfs_daemon { #define ECRYPTFS_DAEMON_IN_READ 0x00000001 #define ECRYPTFS_DAEMON_IN_POLL 0x00000002 @@ -394,10 +392,7 @@ struct ecryptfs_daemon { #define ECRYPTFS_DAEMON_MISCDEV_OPEN 0x00000008 u32 flags; u32 num_queued_msg_ctx; - struct pid *pid; - uid_t euid; - struct user_namespace *user_ns; - struct task_struct *task; + struct file *file; struct mutex mux; struct list_head msg_ctx_out_queue; wait_queue_head_t wait; @@ -554,6 +549,8 @@ extern struct kmem_cache *ecryptfs_key_tfm_cache; struct inode *ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb); void ecryptfs_i_size_init(const char *page_virt, struct inode *inode); +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, + struct inode *ecryptfs_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, struct dentry *ecryptfs_dentry, @@ -607,13 +604,8 @@ int ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode); -int ecryptfs_process_helo(uid_t euid, struct user_namespace *user_ns, - struct pid *pid); -int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, - struct pid *pid); -int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, - struct user_namespace *user_ns, struct pid *pid, - u32 seq); +int ecryptfs_process_response(struct ecryptfs_daemon *daemon, + struct ecryptfs_message *msg, u32 seq); int ecryptfs_send_message(char *data, int data_len, struct ecryptfs_msg_ctx **msg_ctx); int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, @@ -658,8 +650,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, struct inode *ecryptfs_inode); struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index); int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon); -int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns); +int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon); int ecryptfs_parse_packet_length(unsigned char *data, size_t *size, size_t *length_size); int ecryptfs_write_packet_length(char *dest, size_t size, @@ -671,8 +662,7 @@ int ecryptfs_send_miscdev(char *data, size_t data_size, u16 msg_flags, struct ecryptfs_daemon *daemon); void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx); int -ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns, struct pid *pid); +ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file); int ecryptfs_init_kthread(void); void ecryptfs_destroy_kthread(void); int ecryptfs_privileged_open(struct file **lower_file, diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 2b17f2f9b121..44ce5c6a541d 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -138,29 +138,50 @@ out: return rc; } -static void ecryptfs_vma_close(struct vm_area_struct *vma) -{ - filemap_write_and_wait(vma->vm_file->f_mapping); -} - -static const struct vm_operations_struct ecryptfs_file_vm_ops = { - .close = ecryptfs_vma_close, - .fault = filemap_fault, -}; +struct kmem_cache *ecryptfs_file_info_cache; -static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int read_or_initialize_metadata(struct dentry *dentry) { + struct inode *inode = dentry->d_inode; + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; + struct ecryptfs_crypt_stat *crypt_stat; int rc; - rc = generic_file_mmap(file, vma); + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; + mount_crypt_stat = &ecryptfs_superblock_to_private( + inode->i_sb)->mount_crypt_stat; + mutex_lock(&crypt_stat->cs_mutex); + + if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED && + crypt_stat->flags & ECRYPTFS_KEY_VALID) { + rc = 0; + goto out; + } + + rc = ecryptfs_read_metadata(dentry); if (!rc) - vma->vm_ops = &ecryptfs_file_vm_ops; + goto out; + + if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) { + crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED + | ECRYPTFS_ENCRYPTED); + rc = 0; + goto out; + } + if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) && + !i_size_read(ecryptfs_inode_to_lower(inode))) { + rc = ecryptfs_initialize_file(dentry, inode); + if (!rc) + goto out; + } + + rc = -EIO; +out: + mutex_unlock(&crypt_stat->cs_mutex); return rc; } -struct kmem_cache *ecryptfs_file_info_cache; - /** * ecryptfs_open * @inode: inode speciying file to open @@ -236,32 +257,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file) rc = 0; goto out; } - mutex_lock(&crypt_stat->cs_mutex); - if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) - || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { - rc = ecryptfs_read_metadata(ecryptfs_dentry); - if (rc) { - ecryptfs_printk(KERN_DEBUG, - "Valid headers not found\n"); - if (!(mount_crypt_stat->flags - & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { - rc = -EIO; - printk(KERN_WARNING "Either the lower file " - "is not in a valid eCryptfs format, " - "or the key could not be retrieved. " - "Plaintext passthrough mode is not " - "enabled; returning -EIO\n"); - mutex_unlock(&crypt_stat->cs_mutex); - goto out_put; - } - rc = 0; - crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED - | ECRYPTFS_ENCRYPTED); - mutex_unlock(&crypt_stat->cs_mutex); - goto out; - } - } - mutex_unlock(&crypt_stat->cs_mutex); + rc = read_or_initialize_metadata(ecryptfs_dentry); + if (rc) + goto out_put; ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = " "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, (unsigned long long)i_size_read(inode)); @@ -292,15 +290,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file) static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - int rc = 0; - - rc = generic_file_fsync(file, start, end, datasync); - if (rc) - goto out; - rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end, - datasync); -out: - return rc; + return vfs_fsync(ecryptfs_file_to_lower(file), datasync); } static int ecryptfs_fasync(int fd, struct file *file, int flag) @@ -369,7 +359,7 @@ const struct file_operations ecryptfs_main_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, #endif - .mmap = ecryptfs_file_mmap, + .mmap = generic_file_mmap, .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index ffa2be57804d..534b129ea676 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -143,6 +143,31 @@ static int ecryptfs_interpose(struct dentry *lower_dentry, return 0; } +static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry, + struct inode *inode) +{ + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); + struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); + struct dentry *lower_dir_dentry; + int rc; + + dget(lower_dentry); + lower_dir_dentry = lock_parent(lower_dentry); + rc = vfs_unlink(lower_dir_inode, lower_dentry); + if (rc) { + printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); + goto out_unlock; + } + fsstack_copy_attr_times(dir, lower_dir_inode); + set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink); + inode->i_ctime = dir->i_ctime; + d_drop(dentry); +out_unlock: + unlock_dir(lower_dir_dentry); + dput(lower_dentry); + return rc; +} + /** * ecryptfs_do_create * @directory_inode: inode of the new file's dentry's parent in ecryptfs @@ -182,8 +207,10 @@ ecryptfs_do_create(struct inode *directory_inode, } inode = __ecryptfs_get_inode(lower_dentry->d_inode, directory_inode->i_sb); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + vfs_unlink(lower_dir_dentry->d_inode, lower_dentry); goto out_lock; + } fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); out_lock: @@ -200,8 +227,8 @@ out: * * Returns zero on success */ -static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, - struct inode *ecryptfs_inode) +int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, + struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; @@ -264,7 +291,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, * that this on disk file is prepared to be an ecryptfs file */ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode); if (rc) { - drop_nlink(ecryptfs_inode); + ecryptfs_do_unlink(directory_inode, ecryptfs_dentry, + ecryptfs_inode); + make_bad_inode(ecryptfs_inode); unlock_new_inode(ecryptfs_inode); iput(ecryptfs_inode); goto out; @@ -318,21 +347,20 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, struct vfsmount *lower_mnt; int rc = 0; - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); - fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode); - BUG_ON(!lower_dentry->d_count); - dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL); - ecryptfs_set_dentry_private(dentry, dentry_info); if (!dentry_info) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to allocate ecryptfs_dentry_info struct\n", __func__); dput(lower_dentry); - mntput(lower_mnt); - d_drop(dentry); return -ENOMEM; } + + lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); + fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode); + BUG_ON(!lower_dentry->d_count); + + ecryptfs_set_dentry_private(dentry, dentry_info); ecryptfs_set_dentry_lower(dentry, lower_dentry); ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt); @@ -381,12 +409,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, struct dentry *lower_dir_dentry, *lower_dentry; int rc = 0; - if ((ecryptfs_dentry->d_name.len == 1 - && !strcmp(ecryptfs_dentry->d_name.name, ".")) - || (ecryptfs_dentry->d_name.len == 2 - && !strcmp(ecryptfs_dentry->d_name.name, ".."))) { - goto out_d_drop; - } lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); mutex_lock(&lower_dir_dentry->d_inode->i_mutex); lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, @@ -397,8 +419,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, - encrypted_and_encoded_name); - goto out_d_drop; + ecryptfs_dentry->d_name.name); + goto out; } if (lower_dentry->d_inode) goto interpose; @@ -415,7 +437,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt and encode " "filename; rc = [%d]\n", __func__, rc); - goto out_d_drop; + goto out; } mutex_lock(&lower_dir_dentry->d_inode->i_mutex); lower_dentry = lookup_one_len(encrypted_and_encoded_name, @@ -427,14 +449,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); - goto out_d_drop; + goto out; } interpose: rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry, ecryptfs_dir_inode); - goto out; -out_d_drop: - d_drop(ecryptfs_dentry); out: kfree(encrypted_and_encoded_name); return ERR_PTR(rc); @@ -476,27 +495,7 @@ out_lock: static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry) { - int rc = 0; - struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); - struct dentry *lower_dir_dentry; - - dget(lower_dentry); - lower_dir_dentry = lock_parent(lower_dentry); - rc = vfs_unlink(lower_dir_inode, lower_dentry); - if (rc) { - printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); - goto out_unlock; - } - fsstack_copy_attr_times(dir, lower_dir_inode); - set_nlink(dentry->d_inode, - ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink); - dentry->d_inode->i_ctime = dir->i_ctime; - d_drop(dentry); -out_unlock: - unlock_dir(lower_dir_dentry); - dput(lower_dentry); - return rc; + return ecryptfs_do_unlink(dir, dentry, dentry->d_inode); } static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry, @@ -971,12 +970,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) goto out; } - if (S_ISREG(inode->i_mode)) { - rc = filemap_write_and_wait(inode->i_mapping); - if (rc) - goto out; - fsstack_copy_attr_all(inode, lower_inode); - } memcpy(&lower_ia, ia, sizeof(lower_ia)); if (ia->ia_valid & ATTR_FILE) lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 1c0b3b6b75c6..2768138eefee 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -279,6 +279,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; + u8 cipher_code; *check_ruid = 0; @@ -420,6 +421,18 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; + + cipher_code = ecryptfs_code_for_cipher_string( + mount_crypt_stat->global_default_cipher_name, + mount_crypt_stat->global_default_cipher_key_size); + if (!cipher_code) { + ecryptfs_printk(KERN_ERR, + "eCryptfs doesn't support cipher: %s", + mount_crypt_stat->global_default_cipher_name); + rc = -EINVAL; + goto out; + } + mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { @@ -540,6 +553,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); + + /** + * Set the POSIX ACL flag based on whether they're enabled in the lower + * mount. Force a read-only eCryptfs mount if the lower mount is ro. + * Allow a ro eCryptfs mount even when the lower mount is rw. + */ + s->s_flags = flags & ~MS_POSIXACL; + s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL); + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index a750f957b145..b29bb8bfa8d9 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -32,8 +32,8 @@ static struct mutex ecryptfs_msg_ctx_lists_mux; static struct hlist_head *ecryptfs_daemon_hash; struct mutex ecryptfs_daemon_hash_mux; static int ecryptfs_hash_bits; -#define ecryptfs_uid_hash(uid) \ - hash_long((unsigned long)uid, ecryptfs_hash_bits) +#define ecryptfs_current_euid_hash(uid) \ + hash_long((unsigned long)current_euid(), ecryptfs_hash_bits) static u32 ecryptfs_msg_counter; static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; @@ -105,26 +105,24 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) /** * ecryptfs_find_daemon_by_euid - * @euid: The effective user id which maps to the desired daemon id - * @user_ns: The namespace in which @euid applies * @daemon: If return value is zero, points to the desired daemon pointer * * Must be called with ecryptfs_daemon_hash_mux held. * - * Search the hash list for the given user id. + * Search the hash list for the current effective user id. * * Returns zero if the user id exists in the list; non-zero otherwise. */ -int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns) +int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon) { struct hlist_node *elem; int rc; hlist_for_each_entry(*daemon, elem, - &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)], - euid_chain) { - if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) { + &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()], + euid_chain) { + if ((*daemon)->file->f_cred->euid == current_euid() && + (*daemon)->file->f_cred->user_ns == current_user_ns()) { rc = 0; goto out; } @@ -137,9 +135,7 @@ out: /** * ecryptfs_spawn_daemon - Create and initialize a new daemon struct * @daemon: Pointer to set to newly allocated daemon struct - * @euid: Effective user id for the daemon - * @user_ns: The namespace in which @euid applies - * @pid: Process id for the daemon + * @file: File used when opening /dev/ecryptfs * * Must be called ceremoniously while in possession of * ecryptfs_sacred_daemon_hash_mux @@ -147,8 +143,7 @@ out: * Returns zero on success; non-zero otherwise */ int -ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, - struct user_namespace *user_ns, struct pid *pid) +ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file) { int rc = 0; @@ -159,16 +154,13 @@ ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, "GFP_KERNEL memory\n", __func__, sizeof(**daemon)); goto out; } - (*daemon)->euid = euid; - (*daemon)->user_ns = get_user_ns(user_ns); - (*daemon)->pid = get_pid(pid); - (*daemon)->task = current; + (*daemon)->file = file; mutex_init(&(*daemon)->mux); INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue); init_waitqueue_head(&(*daemon)->wait); (*daemon)->num_queued_msg_ctx = 0; hlist_add_head(&(*daemon)->euid_chain, - &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]); + &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()]); out: return rc; } @@ -188,9 +180,6 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ) || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) { rc = -EBUSY; - printk(KERN_WARNING "%s: Attempt to destroy daemon with pid " - "[0x%p], but it is in the midst of a read or a poll\n", - __func__, daemon->pid); mutex_unlock(&daemon->mux); goto out; } @@ -203,12 +192,6 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) ecryptfs_msg_ctx_alloc_to_free(msg_ctx); } hlist_del(&daemon->euid_chain); - if (daemon->task) - wake_up_process(daemon->task); - if (daemon->pid) - put_pid(daemon->pid); - if (daemon->user_ns) - put_user_ns(daemon->user_ns); mutex_unlock(&daemon->mux); kzfree(daemon); out: @@ -216,42 +199,9 @@ out: } /** - * ecryptfs_process_quit - * @euid: The user ID owner of the message - * @user_ns: The namespace in which @euid applies - * @pid: The process ID for the userspace program that sent the - * message - * - * Deletes the corresponding daemon for the given euid and pid, if - * it is the registered that is requesting the deletion. Returns zero - * after deleting the desired daemon; non-zero otherwise. - */ -int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, - struct pid *pid) -{ - struct ecryptfs_daemon *daemon; - int rc; - - mutex_lock(&ecryptfs_daemon_hash_mux); - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns); - if (rc || !daemon) { - rc = -EINVAL; - printk(KERN_ERR "Received request from user [%d] to " - "unregister unrecognized daemon [0x%p]\n", euid, pid); - goto out_unlock; - } - rc = ecryptfs_exorcise_daemon(daemon); -out_unlock: - mutex_unlock(&ecryptfs_daemon_hash_mux); - return rc; -} - -/** * ecryptfs_process_reponse * @msg: The ecryptfs message received; the caller should sanity check * msg->data_len and free the memory - * @pid: The process ID of the userspace application that sent the - * message * @seq: The sequence number of the message; must match the sequence * number for the existing message context waiting for this * response @@ -270,16 +220,11 @@ out_unlock: * * Returns zero on success; non-zero otherwise */ -int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, - struct user_namespace *user_ns, struct pid *pid, - u32 seq) +int ecryptfs_process_response(struct ecryptfs_daemon *daemon, + struct ecryptfs_message *msg, u32 seq) { - struct ecryptfs_daemon *uninitialized_var(daemon); struct ecryptfs_msg_ctx *msg_ctx; size_t msg_size; - struct nsproxy *nsproxy; - struct user_namespace *tsk_user_ns; - uid_t ctx_euid; int rc; if (msg->index >= ecryptfs_message_buf_len) { @@ -292,51 +237,6 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, } msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; mutex_lock(&msg_ctx->mux); - mutex_lock(&ecryptfs_daemon_hash_mux); - rcu_read_lock(); - nsproxy = task_nsproxy(msg_ctx->task); - if (nsproxy == NULL) { - rc = -EBADMSG; - printk(KERN_ERR "%s: Receiving process is a zombie. Dropping " - "message.\n", __func__); - rcu_read_unlock(); - mutex_unlock(&ecryptfs_daemon_hash_mux); - goto wake_up; - } - tsk_user_ns = __task_cred(msg_ctx->task)->user_ns; - ctx_euid = task_euid(msg_ctx->task); - rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, tsk_user_ns); - rcu_read_unlock(); - mutex_unlock(&ecryptfs_daemon_hash_mux); - if (rc) { - rc = -EBADMSG; - printk(KERN_WARNING "%s: User [%d] received a " - "message response from process [0x%p] but does " - "not have a registered daemon\n", __func__, - ctx_euid, pid); - goto wake_up; - } - if (ctx_euid != euid) { - rc = -EBADMSG; - printk(KERN_WARNING "%s: Received message from user " - "[%d]; expected message from user [%d]\n", __func__, - euid, ctx_euid); - goto unlock; - } - if (tsk_user_ns != user_ns) { - rc = -EBADMSG; - printk(KERN_WARNING "%s: Received message from user_ns " - "[0x%p]; expected message from user_ns [0x%p]\n", - __func__, user_ns, tsk_user_ns); - goto unlock; - } - if (daemon->pid != pid) { - rc = -EBADMSG; - printk(KERN_ERR "%s: User [%d] sent a message response " - "from an unrecognized process [0x%p]\n", - __func__, ctx_euid, pid); - goto unlock; - } if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { rc = -EINVAL; printk(KERN_WARNING "%s: Desired context element is not " @@ -359,9 +259,8 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, } memcpy(msg_ctx->msg, msg, msg_size); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE; - rc = 0; -wake_up: wake_up_process(msg_ctx->task); + rc = 0; unlock: mutex_unlock(&msg_ctx->mux); out: @@ -383,14 +282,11 @@ ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) { struct ecryptfs_daemon *daemon; - uid_t euid = current_euid(); int rc; - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); + rc = ecryptfs_find_daemon_by_euid(&daemon); if (rc || !daemon) { rc = -ENOTCONN; - printk(KERN_ERR "%s: User [%d] does not have a daemon " - "registered\n", __func__, euid); goto out; } mutex_lock(&ecryptfs_msg_ctx_lists_mux); diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index c0038f6566d4..412e6eda25f8 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -33,7 +33,7 @@ static atomic_t ecryptfs_num_miscdev_opens; /** * ecryptfs_miscdev_poll - * @file: dev file (ignored) + * @file: dev file * @pt: dev poll table (ignored) * * Returns the poll mask @@ -41,20 +41,10 @@ static atomic_t ecryptfs_num_miscdev_opens; static unsigned int ecryptfs_miscdev_poll(struct file *file, poll_table *pt) { - struct ecryptfs_daemon *daemon; + struct ecryptfs_daemon *daemon = file->private_data; unsigned int mask = 0; - uid_t euid = current_euid(); - int rc; - mutex_lock(&ecryptfs_daemon_hash_mux); - /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) { - mutex_unlock(&ecryptfs_daemon_hash_mux); - return -EINVAL; - } mutex_lock(&daemon->mux); - mutex_unlock(&ecryptfs_daemon_hash_mux); if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { printk(KERN_WARNING "%s: Attempt to poll on zombified " "daemon\n", __func__); @@ -79,7 +69,7 @@ out_unlock_daemon: /** * ecryptfs_miscdev_open * @inode: inode of miscdev handle (ignored) - * @file: file for miscdev handle (ignored) + * @file: file for miscdev handle * * Returns zero on success; non-zero otherwise */ @@ -87,7 +77,6 @@ static int ecryptfs_miscdev_open(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; - uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); @@ -98,30 +87,20 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) "count; rc = [%d]\n", __func__, rc); goto out_unlock_daemon_list; } - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) { - rc = ecryptfs_spawn_daemon(&daemon, euid, current_user_ns(), - task_pid(current)); - if (rc) { - printk(KERN_ERR "%s: Error attempting to spawn daemon; " - "rc = [%d]\n", __func__, rc); - goto out_module_put_unlock_daemon_list; - } - } - mutex_lock(&daemon->mux); - if (daemon->pid != task_pid(current)) { + rc = ecryptfs_find_daemon_by_euid(&daemon); + if (!rc) { rc = -EINVAL; - printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], " - "but pid [0x%p] has attempted to open the handle " - "instead\n", __func__, daemon->pid, daemon->euid, - task_pid(current)); - goto out_unlock_daemon; + goto out_unlock_daemon_list; + } + rc = ecryptfs_spawn_daemon(&daemon, file); + if (rc) { + printk(KERN_ERR "%s: Error attempting to spawn daemon; " + "rc = [%d]\n", __func__, rc); + goto out_module_put_unlock_daemon_list; } + mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { rc = -EBUSY; - printk(KERN_ERR "%s: Miscellaneous device handle may only be " - "opened once per daemon; pid [0x%p] already has this " - "handle open\n", __func__, daemon->pid); goto out_unlock_daemon; } daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN; @@ -140,7 +119,7 @@ out_unlock_daemon_list: /** * ecryptfs_miscdev_release * @inode: inode of fs/ecryptfs/euid handle (ignored) - * @file: file for fs/ecryptfs/euid handle (ignored) + * @file: file for fs/ecryptfs/euid handle * * This keeps the daemon registered until the daemon sends another * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters. @@ -150,20 +129,18 @@ out_unlock_daemon_list: static int ecryptfs_miscdev_release(struct inode *inode, struct file *file) { - struct ecryptfs_daemon *daemon = NULL; - uid_t euid = current_euid(); + struct ecryptfs_daemon *daemon = file->private_data; int rc; - mutex_lock(&ecryptfs_daemon_hash_mux); - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) - daemon = file->private_data; mutex_lock(&daemon->mux); BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN)); daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN; atomic_dec(&ecryptfs_num_miscdev_opens); mutex_unlock(&daemon->mux); + + mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_exorcise_daemon(daemon); + mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { printk(KERN_CRIT "%s: Fatal error whilst attempting to " "shut down daemon; rc = [%d]. Please report this " @@ -171,7 +148,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) BUG(); } module_put(THIS_MODULE); - mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } @@ -248,7 +224,7 @@ int ecryptfs_send_miscdev(char *data, size_t data_size, /** * ecryptfs_miscdev_read - format and send message from queue - * @file: fs/ecryptfs/euid miscdevfs handle (ignored) + * @file: miscdevfs handle * @buf: User buffer into which to copy the next message on the daemon queue * @count: Amount of space available in @buf * @ppos: Offset in file (ignored) @@ -262,43 +238,27 @@ static ssize_t ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct ecryptfs_daemon *daemon; + struct ecryptfs_daemon *daemon = file->private_data; struct ecryptfs_msg_ctx *msg_ctx; size_t packet_length_size; char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE]; size_t i; size_t total_length; - uid_t euid = current_euid(); int rc; - mutex_lock(&ecryptfs_daemon_hash_mux); - /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); - if (rc || !daemon) { - mutex_unlock(&ecryptfs_daemon_hash_mux); - return -EINVAL; - } mutex_lock(&daemon->mux); - if (task_pid(current) != daemon->pid) { - mutex_unlock(&daemon->mux); - mutex_unlock(&ecryptfs_daemon_hash_mux); - return -EPERM; - } if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { rc = 0; - mutex_unlock(&ecryptfs_daemon_hash_mux); printk(KERN_WARNING "%s: Attempt to read from zombified " "daemon\n", __func__); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) { rc = 0; - mutex_unlock(&ecryptfs_daemon_hash_mux); goto out_unlock_daemon; } /* This daemon will not go away so long as this flag is set */ daemon->flags |= ECRYPTFS_DAEMON_IN_READ; - mutex_unlock(&ecryptfs_daemon_hash_mux); check_list: if (list_empty(&daemon->msg_ctx_out_queue)) { mutex_unlock(&daemon->mux); @@ -382,16 +342,12 @@ out_unlock_daemon: * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon * @data: Bytes comprising struct ecryptfs_message * @data_size: sizeof(struct ecryptfs_message) + data len - * @euid: Effective user id of miscdevess sending the miscdev response - * @user_ns: The namespace in which @euid applies - * @pid: Miscdevess id of miscdevess sending the miscdev response * @seq: Sequence number for miscdev response packet * * Returns zero on success; non-zero otherwise */ -static int ecryptfs_miscdev_response(char *data, size_t data_size, - uid_t euid, struct user_namespace *user_ns, - struct pid *pid, u32 seq) +static int ecryptfs_miscdev_response(struct ecryptfs_daemon *daemon, char *data, + size_t data_size, u32 seq) { struct ecryptfs_message *msg = (struct ecryptfs_message *)data; int rc; @@ -403,7 +359,7 @@ static int ecryptfs_miscdev_response(char *data, size_t data_size, rc = -EINVAL; goto out; } - rc = ecryptfs_process_response(msg, euid, user_ns, pid, seq); + rc = ecryptfs_process_response(daemon, msg, seq); if (rc) printk(KERN_ERR "Error processing response message; rc = [%d]\n", rc); @@ -413,7 +369,7 @@ out: /** * ecryptfs_miscdev_write - handle write to daemon miscdev handle - * @file: File for misc dev handle (ignored) + * @file: File for misc dev handle * @buf: Buffer containing user data * @count: Amount of data in @buf * @ppos: Pointer to offset in file (ignored) @@ -428,7 +384,6 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, u32 seq; size_t packet_size, packet_size_length; char *data; - uid_t euid = current_euid(); unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE]; ssize_t rc; @@ -488,10 +443,9 @@ memdup: } memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE); seq = be32_to_cpu(counter_nbo); - rc = ecryptfs_miscdev_response( + rc = ecryptfs_miscdev_response(file->private_data, &data[PKT_LEN_OFFSET + packet_size_length], - packet_size, euid, current_user_ns(), - task_pid(current), seq); + packet_size, seq); if (rc) { printk(KERN_WARNING "%s: Failed to deliver miscdev " "response to requesting operation; rc = [%zd]\n", diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index a46b3a8fee1e..bd1d57f98f74 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) { int rc; - /* - * Refuse to write the page out if we are called from reclaim context - * since our writepage() path may potentially allocate memory when - * calling into the lower fs vfs_write() which may in turn invoke - * us again. - */ - if (current->flags & PF_MEMALLOC) { - redirty_page_for_writepage(wbc, page); - rc = 0; - goto out; - } - rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting " @@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file, struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; int rc; - int need_unlock_page = 1; ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); @@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file, "zeros in page with index = [0x%.16lx]\n", index); goto out; } - set_page_dirty(page); - unlock_page(page); - need_unlock_page = 0; + rc = ecryptfs_encrypt_page(page); + if (rc) { + ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " + "index [0x%.16lx])\n", index); + goto out; + } if (pos + copied > i_size_read(ecryptfs_inode)) { i_size_write(ecryptfs_inode, pos + copied); ecryptfs_printk(KERN_DEBUG, "Expanded file size to " "[0x%.16llx]\n", (unsigned long long)i_size_read(ecryptfs_inode)); - balance_dirty_pages_ratelimited(mapping); - rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); - if (rc) { - printk(KERN_ERR "Error writing inode size to metadata; " - "rc = [%d]\n", rc); - goto out; - } } - rc = copied; + rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); + if (rc) + printk(KERN_ERR "Error writing inode size to metadata; " + "rc = [%d]\n", rc); + else + rc = copied; out: - if (need_unlock_page) - unlock_page(page); + unlock_page(page); page_cache_release(page); return rc; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 1c8b55670804..eedec84c1809 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) error = PTR_ERR(file); goto out_free_fd; } - fd_install(fd, file); ep->file = file; + fd_install(fd, file); return fd; out_free_fd: diff --git a/fs/exec.c b/fs/exec.c index 3684353ebd5f..574cf4de4ec3 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -2069,25 +2069,18 @@ static void wait_for_dump_helpers(struct file *file) */ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) { - struct file *rp, *wp; + struct file *files[2]; struct fdtable *fdt; struct coredump_params *cp = (struct coredump_params *)info->data; struct files_struct *cf = current->files; + int err = create_pipe_files(files, 0); + if (err) + return err; - wp = create_write_pipe(0); - if (IS_ERR(wp)) - return PTR_ERR(wp); - - rp = create_read_pipe(wp, 0); - if (IS_ERR(rp)) { - free_write_pipe(wp); - return PTR_ERR(rp); - } - - cp->file = wp; + cp->file = files[1]; sys_close(0); - fd_install(0, rp); + fd_install(0, files[0]); spin_lock(&cf->file_lock); fdt = files_fdtable(cf); __set_open_fd(0, fdt); diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 5badb0c039de..1562c27a2fab 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -37,15 +37,12 @@ #define EXOFS_DBGMSG2(M...) do {} while (0) -enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), }; - unsigned exofs_max_io_pages(struct ore_layout *layout, unsigned expected_pages) { - unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC); + unsigned pages = min_t(unsigned, expected_pages, + layout->max_io_length / PAGE_SIZE); - /* TODO: easily support bio chaining */ - pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE); return pages; } @@ -101,7 +98,8 @@ static void _pcol_reset(struct page_collect *pcol) * it might not end here. don't be left with nothing */ if (!pcol->expected_pages) - pcol->expected_pages = MAX_PAGES_KMALLOC; + pcol->expected_pages = + exofs_max_io_pages(&pcol->sbi->layout, ~0); } static int pcol_try_alloc(struct page_collect *pcol) @@ -389,6 +387,8 @@ static int readpage_strip(void *data, struct page *page) size_t len; int ret; + BUG_ON(!PageLocked(page)); + /* FIXME: Just for debugging, will be removed */ if (PageUptodate(page)) EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino, @@ -572,8 +572,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate) if (!pcol->that_locked_page || (pcol->that_locked_page->index != index)) { - struct page *page = find_get_page(pcol->inode->i_mapping, index); + struct page *page; + loff_t i_size = i_size_read(pcol->inode); + + if (offset >= i_size) { + *uptodate = true; + EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index); + return ZERO_PAGE(0); + } + page = find_get_page(pcol->inode->i_mapping, index); if (!page) { page = find_or_create_page(pcol->inode->i_mapping, index, GFP_NOFS); @@ -602,12 +610,13 @@ static void __r4w_put_page(void *priv, struct page *page) { struct page_collect *pcol = priv; - if (pcol->that_locked_page != page) { + if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { EXOFS_DBGMSG("index=0x%lx\n", page->index); page_cache_release(page); return; } - EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index); + EXOFS_DBGMSG("that_locked_page index=0x%lx\n", + ZERO_PAGE(0) == page ? -1 : page->index); } static const struct _ore_r4w_op _r4w_op = { diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c index 24a49d47e935..1585db1aa365 100644 --- a/fs/exofs/ore.c +++ b/fs/exofs/ore.c @@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp) bio->bi_rw |= REQ_WRITE; } - osd_req_write(or, _ios_obj(ios, dev), per_dev->offset, - bio, per_dev->length); + osd_req_write(or, _ios_obj(ios, cur_comp), + per_dev->offset, bio, per_dev->length); ORE_DBGMSG("write(0x%llx) offset=0x%llx " "length=0x%llx dev=%d\n", - _LLU(_ios_obj(ios, dev)->id), + _LLU(_ios_obj(ios, cur_comp)->id), _LLU(per_dev->offset), _LLU(per_dev->length), dev); } else if (ios->kern_buff) { @@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp) (ios->si.unit_off + ios->length > ios->layout->stripe_unit)); - ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev), + ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp), per_dev->offset, ios->kern_buff, ios->length); if (unlikely(ret)) goto out; ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx " "length=0x%llx dev=%d\n", - _LLU(_ios_obj(ios, dev)->id), + _LLU(_ios_obj(ios, cur_comp)->id), _LLU(per_dev->offset), _LLU(ios->length), per_dev->dev); } else { - osd_req_set_attributes(or, _ios_obj(ios, dev)); + osd_req_set_attributes(or, _ios_obj(ios, cur_comp)); ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n", - _LLU(_ios_obj(ios, dev)->id), + _LLU(_ios_obj(ios, cur_comp)->id), ios->out_attr_len, dev); } diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 433783624d10..dde41a75c7c8 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -400,8 +400,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait) ret = ore_write(ios); if (unlikely(ret)) EXOFS_ERR("%s: ore_write failed.\n", __func__); - else - sb->s_dirt = 0; unlock_super(sb); @@ -412,14 +410,6 @@ out: return ret; } -static void exofs_write_super(struct super_block *sb) -{ - if (!(sb->s_flags & MS_RDONLY)) - exofs_sync_fs(sb, 1); - else - sb->s_dirt = 0; -} - static void _exofs_print_device(const char *msg, const char *dev_path, struct osd_dev *od, u64 pid) { @@ -952,7 +942,6 @@ static const struct super_operations exofs_sops = { .write_inode = exofs_write_inode, .evict_inode = exofs_evict_inode, .put_super = exofs_put_super, - .write_super = exofs_write_super, .sync_fs = exofs_sync_fs, .statfs = exofs_statfs, }; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 264d315f6c47..6363ac66fafa 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -79,6 +79,7 @@ void ext2_evict_inode(struct inode * inode) truncate_inode_pages(&inode->i_data, 0); if (want_delete) { + sb_start_intwrite(inode->i_sb); /* set dtime */ EXT2_I(inode)->i_dtime = get_seconds(); mark_inode_dirty(inode); @@ -98,8 +99,10 @@ void ext2_evict_inode(struct inode * inode) if (unlikely(rsv)) kfree(rsv); - if (want_delete) + if (want_delete) { ext2_free_inode(inode); + sb_end_intwrite(inode->i_sb); + } } typedef struct { diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9f311d27b16f..af74d9e27b71 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -42,6 +42,8 @@ static void ext2_sync_super(struct super_block *sb, static int ext2_remount (struct super_block * sb, int * flags, char * data); static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext2_sync_fs(struct super_block *sb, int wait); +static int ext2_freeze(struct super_block *sb); +static int ext2_unfreeze(struct super_block *sb); void ext2_error(struct super_block *sb, const char *function, const char *fmt, ...) @@ -305,6 +307,8 @@ static const struct super_operations ext2_sops = { .evict_inode = ext2_evict_inode, .put_super = ext2_put_super, .sync_fs = ext2_sync_fs, + .freeze_fs = ext2_freeze, + .unfreeze_fs = ext2_unfreeze, .statfs = ext2_statfs, .remount_fs = ext2_remount, .show_options = ext2_show_options, @@ -1200,6 +1204,35 @@ static int ext2_sync_fs(struct super_block *sb, int wait) return 0; } +static int ext2_freeze(struct super_block *sb) +{ + struct ext2_sb_info *sbi = EXT2_SB(sb); + + /* + * Open but unlinked files present? Keep EXT2_VALID_FS flag cleared + * because we have unattached inodes and thus filesystem is not fully + * consistent. + */ + if (atomic_long_read(&sb->s_remove_count)) { + ext2_sync_fs(sb, 1); + return 0; + } + /* Set EXT2_FS_VALID flag */ + spin_lock(&sbi->s_lock); + sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state); + spin_unlock(&sbi->s_lock); + ext2_sync_super(sb, sbi->s_es, 1); + + return 0; +} + +static int ext2_unfreeze(struct super_block *sb) +{ + /* Just write sb to clear EXT2_VALID_FS flag */ + ext2_write_super(sb); + + return 0; +} void ext2_write_super(struct super_block *sb) { diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 9a4a5c48b1c9..a07597307fd1 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -3459,14 +3459,6 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. - * - * Is this efficient/effective? Well, we're being nice to the system - * by cleaning up our inodes proactively so they can be reaped - * without I/O. But we are potentially leaving up to five seconds' - * worth of inodes floating about which prune_icache wants us to - * write out. One way to fix that would be to get prune_icache() - * to do a write_super() to free up some memory. It has the desired - * effect. */ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) { diff --git a/fs/ext3/super.c b/fs/ext3/super.c index ff9bcdc5b0d5..8c892e93d8e7 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -64,11 +64,6 @@ static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. - * - * The only special thing we need to do here is to make sure that all - * journal_end calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { @@ -90,12 +85,6 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) return journal_start(journal, nblocks); } -/* - * The only special thing we need to do here is to make sure that all - * journal_stop calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. - */ int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d23b31ca9d7a..1b5089067d01 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -280,14 +280,18 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, return desc; } -static int ext4_valid_block_bitmap(struct super_block *sb, - struct ext4_group_desc *desc, - unsigned int block_group, - struct buffer_head *bh) +/* + * Return the block number which was discovered to be invalid, or 0 if + * the block bitmap is valid. + */ +static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, + struct ext4_group_desc *desc, + unsigned int block_group, + struct buffer_head *bh) { ext4_grpblk_t offset; ext4_grpblk_t next_zero_bit; - ext4_fsblk_t bitmap_blk; + ext4_fsblk_t blk; ext4_fsblk_t group_first_block; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { @@ -297,37 +301,33 @@ static int ext4_valid_block_bitmap(struct super_block *sb, * or it has to also read the block group where the bitmaps * are located to verify they are set. */ - return 1; + return 0; } group_first_block = ext4_group_first_block_no(sb, block_group); /* check whether block bitmap block number is set */ - bitmap_blk = ext4_block_bitmap(sb, desc); - offset = bitmap_blk - group_first_block; + blk = ext4_block_bitmap(sb, desc); + offset = blk - group_first_block; if (!ext4_test_bit(offset, bh->b_data)) /* bad block bitmap */ - goto err_out; + return blk; /* check whether the inode bitmap block number is set */ - bitmap_blk = ext4_inode_bitmap(sb, desc); - offset = bitmap_blk - group_first_block; + blk = ext4_inode_bitmap(sb, desc); + offset = blk - group_first_block; if (!ext4_test_bit(offset, bh->b_data)) /* bad block bitmap */ - goto err_out; + return blk; /* check whether the inode table block number is set */ - bitmap_blk = ext4_inode_table(sb, desc); - offset = bitmap_blk - group_first_block; + blk = ext4_inode_table(sb, desc); + offset = blk - group_first_block; next_zero_bit = ext4_find_next_zero_bit(bh->b_data, offset + EXT4_SB(sb)->s_itb_per_group, offset); - if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) - /* good bitmap for inode tables */ - return 1; - -err_out: - ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", - block_group, bitmap_blk); + if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group) + /* bad bitmap for inode tables */ + return blk; return 0; } @@ -336,14 +336,26 @@ void ext4_validate_block_bitmap(struct super_block *sb, unsigned int block_group, struct buffer_head *bh) { + ext4_fsblk_t blk; + if (buffer_verified(bh)) return; ext4_lock_group(sb, block_group); - if (ext4_valid_block_bitmap(sb, desc, block_group, bh) && - ext4_block_bitmap_csum_verify(sb, block_group, desc, bh, - EXT4_BLOCKS_PER_GROUP(sb) / 8)) - set_buffer_verified(bh); + blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); + if (unlikely(blk != 0)) { + ext4_unlock_group(sb, block_group); + ext4_error(sb, "bg %u: block %llu: invalid block bitmap", + block_group, blk); + return; + } + if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, + desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) { + ext4_unlock_group(sb, block_group); + ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); + return; + } + set_buffer_verified(bh); ext4_unlock_group(sb, block_group); } diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c index f8716eab9995..5c2d1813ebe9 100644 --- a/fs/ext4/bitmap.c +++ b/fs/ext4/bitmap.c @@ -79,7 +79,6 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, if (provided == calculated) return 1; - ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group); return 0; } diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index cd0c7ed06772..aabbb3f53683 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2662,6 +2662,7 @@ cont: } path[0].p_depth = depth; path[0].p_hdr = ext_inode_hdr(inode); + i = 0; if (ext4_ext_check(inode, path[0].p_hdr, depth)) { err = -EIO; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 89b59cb7f9b8..dff171c3a123 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -233,6 +233,11 @@ void ext4_evict_inode(struct inode *inode) if (is_bad_inode(inode)) goto no_delete; + /* + * Protect us against freezing - iput() caller didn't have to have any + * protection against it + */ + sb_start_intwrite(inode->i_sb); handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); @@ -242,6 +247,7 @@ void ext4_evict_inode(struct inode *inode) * cleaned up. */ ext4_orphan_del(NULL, inode); + sb_end_intwrite(inode->i_sb); goto no_delete; } @@ -273,6 +279,7 @@ void ext4_evict_inode(struct inode *inode) stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); + sb_end_intwrite(inode->i_sb); goto no_delete; } } @@ -301,6 +308,7 @@ void ext4_evict_inode(struct inode *inode) else ext4_free_inode(handle, inode); ext4_journal_stop(handle); + sb_end_intwrite(inode->i_sb); return; no_delete: ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ @@ -1962,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); * This function can get called via... * - ext4_da_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) - * - shrink_page_list via pdflush (no journal handle) + * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) * * We don't do any block allocation in this function. If we have page with @@ -4581,14 +4589,6 @@ static int ext4_expand_extra_isize(struct inode *inode, * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. - * - * Is this efficient/effective? Well, we're being nice to the system - * by cleaning up our inodes proactively so they can be reaped - * without I/O. But we are potentially leaving up to five seconds' - * worth of inodes floating about which prune_icache wants us to - * write out. One way to fix that would be to get prune_icache() - * to do a write_super() to free up some memory. It has the desired - * effect. */ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) { @@ -4779,11 +4779,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) get_block_t *get_block; int retries = 0; - /* - * This check is racy but catches the common case. We rely on - * __block_page_mkwrite() to do a reliable check. - */ - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_pagefault(inode->i_sb); /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && @@ -4851,5 +4847,6 @@ retry_alloc: out_ret: ret = block_page_mkwrite_return(ret); out: + sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index f99a1311e847..fe7c63f4717e 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -44,6 +44,11 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) { struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); + /* + * We protect against freezing so that we don't create dirty buffers + * on frozen filesystem. + */ + sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); mark_buffer_dirty(bh); lock_buffer(bh); @@ -51,6 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) get_bh(bh); submit_bh(WRITE_SYNC, bh); wait_on_buffer(bh); + sb_end_write(sb); if (unlikely(!buffer_uptodate(bh))) return 1; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2d51cd9af225..c6e0cb3d1f4a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -326,38 +326,17 @@ static void ext4_put_nojournal(handle_t *handle) /* * Wrappers for jbd2_journal_start/end. - * - * The only special thing we need to do here is to make sure that all - * journal_end calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. - * - * To avoid j_barrier hold in userspace when a user calls freeze(), - * ext4 prevents a new handle from being started by s_frozen, which - * is in an upper layer. */ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; - handle_t *handle; trace_ext4_journal_start(sb, nblocks, _RET_IP_); if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); + WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); journal = EXT4_SB(sb)->s_journal; - handle = ext4_journal_current_handle(); - - /* - * If a handle has been started, it should be allowed to - * finish, otherwise deadlock could happen between freeze - * and others(e.g. truncate) due to the restart of the - * journal handle if the filesystem is forzen and active - * handles are not stopped. - */ - if (!handle) - vfs_check_frozen(sb, SB_FREEZE_TRANS); - if (!journal) return ext4_get_nojournal(); /* @@ -372,12 +351,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) return jbd2_journal_start(journal, nblocks); } -/* - * The only special thing we need to do here is to make sure that all - * jbd2_journal_stop calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. - */ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) { struct super_block *sb; @@ -975,6 +948,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; + ei->i_da_metadata_calc_last_lblock = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; @@ -2747,6 +2721,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr) sb = elr->lr_super; ngroups = EXT4_SB(sb)->s_groups_count; + sb_start_write(sb); for (group = elr->lr_next_group; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { @@ -2773,6 +2748,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr) elr->lr_next_sched = jiffies + elr->lr_timeout; elr->lr_next_group = group + 1; } + sb_end_write(sb); return ret; } @@ -3133,6 +3109,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, ext4_group_t i, ngroups = ext4_get_groups_count(sb); int s, j, count = 0; + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) + return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + + sbi->s_itb_per_group + 2); + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + (grp * EXT4_BLOCKS_PER_GROUP(sb)); last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; @@ -4444,6 +4424,7 @@ static void ext4_clear_journal_err(struct super_block *sb, ext4_commit_super(sb, 1); jbd2_journal_clear_err(journal); + jbd2_journal_update_sb_errno(journal); } } @@ -4460,10 +4441,8 @@ int ext4_force_commit(struct super_block *sb) return 0; journal = EXT4_SB(sb)->s_journal; - if (journal) { - vfs_check_frozen(sb, SB_FREEZE_TRANS); + if (journal) ret = ext4_journal_force_commit(journal); - } return ret; } @@ -4493,9 +4472,8 @@ static int ext4_sync_fs(struct super_block *sb, int wait) * gives us a chance to flush the journal completely and mark the fs clean. * * Note that only this function cannot bring a filesystem to be in a clean - * state independently, because ext4 prevents a new handle from being started - * by @sb->s_frozen, which stays in an upper layer. It thus needs help from - * the upper layer. + * state independently. It relies on upper layer to stop all data & metadata + * modifications. */ static int ext4_freeze(struct super_block *sb) { @@ -4522,7 +4500,7 @@ static int ext4_freeze(struct super_block *sb) EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: - /* we rely on s_frozen to stop further updates */ + /* we rely on upper layer to stop further updates */ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return error; } diff --git a/fs/fat/file.c b/fs/fat/file.c index a71fe3715ee8..e007b8bd8e5e 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -43,10 +43,10 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) if (err) goto out; - mutex_lock(&inode->i_mutex); err = mnt_want_write_file(file); if (err) - goto out_unlock_inode; + goto out; + mutex_lock(&inode->i_mutex); /* * ATTR_VOLUME and ATTR_DIR cannot be changed; this also @@ -73,14 +73,14 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) /* The root directory has no attributes */ if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) { err = -EINVAL; - goto out_drop_write; + goto out_unlock_inode; } if (sbi->options.sys_immutable && ((attr | oldattr) & ATTR_SYS) && !capable(CAP_LINUX_IMMUTABLE)) { err = -EPERM; - goto out_drop_write; + goto out_unlock_inode; } /* @@ -90,12 +90,12 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) */ err = security_inode_setattr(file->f_path.dentry, &ia); if (err) - goto out_drop_write; + goto out_unlock_inode; /* This MUST be done before doing anything irreversible... */ err = fat_setattr(file->f_path.dentry, &ia); if (err) - goto out_drop_write; + goto out_unlock_inode; fsnotify_change(file->f_path.dentry, ia.ia_valid); if (sbi->options.sys_immutable) { @@ -107,10 +107,9 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) fat_save_attrs(inode, attr); mark_inode_dirty(inode); -out_drop_write: - mnt_drop_write_file(file); out_unlock_inode: mutex_unlock(&inode->i_mutex); + mnt_drop_write_file(file); out: return err; } diff --git a/fs/file_table.c b/fs/file_table.c index b3fc4d67a26b..701985e4ccda 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -43,7 +43,7 @@ static struct kmem_cache *filp_cachep __read_mostly; static struct percpu_counter nr_files __cacheline_aligned_in_smp; -static inline void file_free_rcu(struct rcu_head *head) +static void file_free_rcu(struct rcu_head *head) { struct file *f = container_of(head, struct file, f_u.fu_rcuhead); @@ -217,7 +217,7 @@ static void drop_file_write_access(struct file *file) return; if (file_check_writeable(file) != 0) return; - mnt_drop_write(mnt); + __mnt_drop_write(mnt); file_release_write(file); } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 8964cf3999b2..324bc0850534 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -383,6 +383,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, struct fuse_entry_out outentry; struct fuse_file *ff; + /* Userspace expects S_IFREG in create mode */ + BUG_ON((mode & S_IFMT) != S_IFREG); + forget = fuse_alloc_forget(); err = -ENOMEM; if (!forget) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index b321a688cde7..aba15f1b7ad2 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -703,13 +703,16 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); - if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { + /* + * In auto invalidate mode, always update attributes on read. + * Otherwise, only update if we attempt to read past EOF (to ensure + * i_size is up to date). + */ + if (fc->auto_inval_data || + (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { int err; - /* - * If trying to read past EOF, make sure the i_size - * attribute is up-to-date. - */ err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); if (err) return err; @@ -944,9 +947,8 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, return err; count = ocount; - + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -1004,6 +1006,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, out: current->backing_dev_info = NULL; mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); return written ? written : err; } @@ -1700,7 +1703,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) size_t n; u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; - for (n = 0; n < count; n++) { + for (n = 0; n < count; n++, iov++) { if (iov->iov_len > (size_t) max) return -ENOMEM; max -= iov->iov_len; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 771fb6322c07..e24dd74e3068 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -484,6 +484,9 @@ struct fuse_conn { /** Is fallocate not implemented by fs? */ unsigned no_fallocate:1; + /** Use enhanced/automatic page cache invalidation. */ + unsigned auto_inval_data:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 1cd61652018c..ce0a2838ccd0 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -197,6 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; + struct timespec old_mtime; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { @@ -204,15 +205,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, return; } + old_mtime = inode->i_mtime; fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); - if (S_ISREG(inode->i_mode) && oldsize != attr->size) { - truncate_pagecache(inode, oldsize, attr->size); - invalidate_inode_pages2(inode->i_mapping); + if (S_ISREG(inode->i_mode)) { + bool inval = false; + + if (oldsize != attr->size) { + truncate_pagecache(inode, oldsize, attr->size); + inval = true; + } else if (fc->auto_inval_data) { + struct timespec new_mtime = { + .tv_sec = attr->mtime, + .tv_nsec = attr->mtimensec, + }; + + /* + * Auto inval mode also checks and invalidates if mtime + * has changed. + */ + if (!timespec_equal(&old_mtime, &new_mtime)) + inval = true; + } + + if (inval) + invalidate_inode_pages2(inode->i_mapping); } } @@ -834,6 +855,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; + if (arg->flags & FUSE_AUTO_INVAL_DATA) + fc->auto_inval_data = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; @@ -859,7 +882,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | - FUSE_FLOCK_LOCKS; + FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | + FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 9aa6af13823c..d1d791ef38de 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -373,11 +373,10 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) loff_t size; int ret; - /* Wait if fs is frozen. This is racy so we check again later on - * and retry if the fs has been frozen after the page lock has - * been acquired - */ - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_pagefault(inode->i_sb); + + /* Update file times before taking page lock */ + file_update_time(vma->vm_file); ret = gfs2_rs_alloc(ip); if (ret) @@ -462,14 +461,9 @@ out: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); - /* This check must be post dropping of transaction lock */ - if (inode->i_sb->s_frozen == SB_UNFROZEN) { - wait_on_page_writeback(page); - } else { - ret = -EAGAIN; - unlock_page(page); - } + wait_on_page_writeback(page); } + sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); } diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 3a56c8d94de0..22255d96b27e 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can - * potentially cause a busy-wait loop from pdflush and kswapd + * potentially cause a busy-wait loop from flusher thread and kswapd * activity, but those code paths have their own higher-level * throttling. */ diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index ad3e2fb763d7..adbd27875ef9 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -50,6 +50,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, if (revokes) tr->tr_reserved += gfs2_struct2blk(sdp, revokes, sizeof(u64)); + sb_start_intwrite(sdp->sd_vfs); gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh); error = gfs2_glock_nq(&tr->tr_t_gh); @@ -68,6 +69,7 @@ fail_gunlock: gfs2_glock_dq(&tr->tr_t_gh); fail_holder_uninit: + sb_end_intwrite(sdp->sd_vfs); gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); @@ -116,6 +118,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); } + sb_end_intwrite(sdp->sd_vfs); return; } @@ -136,6 +139,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) gfs2_log_flush(sdp, NULL); + sb_end_intwrite(sdp->sd_vfs); } /** diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c index 5fd51a5833ff..b7ec224910c5 100644 --- a/fs/hfs/mdb.c +++ b/fs/hfs/mdb.c @@ -236,10 +236,10 @@ out: * hfs_mdb_commit() * * Description: - * This updates the MDB on disk (look also at hfs_write_super()). + * This updates the MDB on disk. * It does not check, if the superblock has been modified, or * if the filesystem has been mounted read-only. It is mainly - * called by hfs_write_super() and hfs_btree_extend(). + * called by hfs_sync_fs() and flush_mdb(). * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * int backup; diff --git a/fs/inode.c b/fs/inode.c index 3cc504320467..ac8d904b3f16 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1542,9 +1542,11 @@ void touch_atime(struct path *path) if (timespec_equal(&inode->i_atime, &now)) return; - if (mnt_want_write(mnt)) + if (!sb_start_write_trylock(inode->i_sb)) return; + if (__mnt_want_write(mnt)) + goto skip_update; /* * File systems can error out when updating inodes if they need to * allocate new space to modify an inode (such is the case for @@ -1555,7 +1557,9 @@ void touch_atime(struct path *path) * of the fs read only, e.g. subvolumes in Btrfs. */ update_time(inode, &now, S_ATIME); - mnt_drop_write(mnt); + __mnt_drop_write(mnt); +skip_update: + sb_end_write(inode->i_sb); } EXPORT_SYMBOL(touch_atime); @@ -1662,11 +1666,11 @@ int file_update_time(struct file *file) return 0; /* Finally allowed to write? Takes lock. */ - if (mnt_want_write_file(file)) + if (__mnt_want_write_file(file)) return 0; ret = update_time(inode, &now, sync_it); - mnt_drop_write_file(file); + __mnt_drop_write_file(file); return ret; } diff --git a/fs/internal.h b/fs/internal.h index a6fd56c68b11..371bcc4b1697 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -61,6 +61,10 @@ extern void __init mnt_init(void); extern struct lglock vfsmount_lock; +extern int __mnt_want_write(struct vfsmount *); +extern int __mnt_want_write_file(struct file *); +extern void __mnt_drop_write(struct vfsmount *); +extern void __mnt_drop_write_file(struct file *); /* * fs_struct.c diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 425c2f2cf170..09357508ec9a 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid) ret = 1; } else if (journal->j_committing_transaction) { /* - * If ext3_write_super() recently started a commit, then we - * have to wait for completion of that transaction + * If commit has been started, then we have to wait for + * completion of that transaction. */ if (ptid) *ptid = journal->j_committing_transaction->t_tid; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e9a3c4c85594..e149b99a7ffb 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid) ret = 1; } else if (journal->j_committing_transaction) { /* - * If ext3_write_super() recently started a commit, then we - * have to wait for completion of that transaction + * If commit has been started, then we have to wait for + * completion of that transaction. */ if (ptid) *ptid = journal->j_committing_transaction->t_tid; @@ -1377,7 +1377,7 @@ static void jbd2_mark_journal_empty(journal_t *journal) * Update a journal's errno. Write updated superblock to disk waiting for IO * to complete. */ -static void jbd2_journal_update_sb_errno(journal_t *journal) +void jbd2_journal_update_sb_errno(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; @@ -1390,6 +1390,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal) jbd2_write_superblock(journal, WRITE_SYNC); } +EXPORT_SYMBOL(jbd2_journal_update_sb_errno); /* * Read the superblock for a given journal, performing initial diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 8392cb85bd54..05d29124c6ab 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -156,12 +156,16 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) struct nlm_rqst *call; int status; - nlm_get_host(host); call = nlm_alloc_call(host); if (call == NULL) return -ENOMEM; nlmclnt_locks_init_private(fl, host); + if (!fl->fl_u.nfs_fl.owner) { + /* lockowner allocation has failed */ + nlmclnt_release_call(call); + return -ENOMEM; + } /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); @@ -185,9 +189,6 @@ EXPORT_SYMBOL_GPL(nlmclnt_proc); /* * Allocate an NLM RPC call struct - * - * Note: the caller must hold a reference to host. In case of failure, - * this reference will be released. */ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) { @@ -199,7 +200,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) atomic_set(&call->a_count, 1); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); - call->a_host = host; + call->a_host = nlm_get_host(host); return call; } if (signalled()) @@ -207,7 +208,6 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } - nlmclnt_release_host(host); return NULL; } @@ -750,7 +750,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" " Attempting to cancel lock.\n"); - req = nlm_alloc_call(nlm_get_host(host)); + req = nlm_alloc_call(host); if (!req) return -ENOMEM; req->a_flags = RPC_TASK_ASYNC; diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 4a43d253c045..b147d1ae71fd 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -257,6 +257,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args return rpc_system_err; call = nlm_alloc_call(host); + nlmsvc_release_host(host); if (call == NULL) return rpc_system_err; diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index afe4488c33d8..fb1a2bedbe97 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -219,7 +219,6 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, struct nlm_block *block; struct nlm_rqst *call = NULL; - nlm_get_host(host); call = nlm_alloc_call(host); if (call == NULL) return NULL; diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index de8f2caa2235..3009a365e082 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -297,6 +297,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args return rpc_system_err; call = nlm_alloc_call(host); + nlmsvc_release_host(host); if (call == NULL) return rpc_system_err; diff --git a/fs/locks.c b/fs/locks.c index cdcf219a7391..7e81bfc75164 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -200,11 +200,7 @@ void locks_release_private(struct file_lock *fl) fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; } - if (fl->fl_lmops) { - if (fl->fl_lmops->lm_release_private) - fl->fl_lmops->lm_release_private(fl); - fl->fl_lmops = NULL; - } + fl->fl_lmops = NULL; } EXPORT_SYMBOL_GPL(locks_release_private); diff --git a/fs/namei.c b/fs/namei.c index 2ccc35c4dc24..dd1ed1b8e98e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask) /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on + * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. @@ -650,6 +651,122 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki path_put(link); } +int sysctl_protected_symlinks __read_mostly = 1; +int sysctl_protected_hardlinks __read_mostly = 1; + +/** + * may_follow_link - Check symlink following for unsafe situations + * @link: The path of the symlink + * @nd: nameidata pathwalk data + * + * In the case of the sysctl_protected_symlinks sysctl being enabled, + * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is + * in a sticky world-writable directory. This is to protect privileged + * processes from failing races against path names that may change out + * from under them by way of other users creating malicious symlinks. + * It will permit symlinks to be followed only when outside a sticky + * world-writable directory, or when the uid of the symlink and follower + * match, or when the directory owner matches the symlink's owner. + * + * Returns 0 if following the symlink is allowed, -ve on error. + */ +static inline int may_follow_link(struct path *link, struct nameidata *nd) +{ + const struct inode *inode; + const struct inode *parent; + + if (!sysctl_protected_symlinks) + return 0; + + /* Allowed if owner and follower match. */ + inode = link->dentry->d_inode; + if (current_cred()->fsuid == inode->i_uid) + return 0; + + /* Allowed if parent directory not sticky and world-writable. */ + parent = nd->path.dentry->d_inode; + if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) + return 0; + + /* Allowed if parent directory and link owner match. */ + if (parent->i_uid == inode->i_uid) + return 0; + + path_put_conditional(link, nd); + path_put(&nd->path); + audit_log_link_denied("follow_link", link); + return -EACCES; +} + +/** + * safe_hardlink_source - Check for safe hardlink conditions + * @inode: the source inode to hardlink from + * + * Return false if at least one of the following conditions: + * - inode is not a regular file + * - inode is setuid + * - inode is setgid and group-exec + * - access failure for read and write + * + * Otherwise returns true. + */ +static bool safe_hardlink_source(struct inode *inode) +{ + umode_t mode = inode->i_mode; + + /* Special files should not get pinned to the filesystem. */ + if (!S_ISREG(mode)) + return false; + + /* Setuid files should not get pinned to the filesystem. */ + if (mode & S_ISUID) + return false; + + /* Executable setgid files should not get pinned to the filesystem. */ + if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) + return false; + + /* Hardlinking to unreadable or unwritable sources is dangerous. */ + if (inode_permission(inode, MAY_READ | MAY_WRITE)) + return false; + + return true; +} + +/** + * may_linkat - Check permissions for creating a hardlink + * @link: the source to hardlink from + * + * Block hardlink when all of: + * - sysctl_protected_hardlinks enabled + * - fsuid does not match inode + * - hardlink source is unsafe (see safe_hardlink_source() above) + * - not CAP_FOWNER + * + * Returns 0 if successful, -ve on error. + */ +static int may_linkat(struct path *link) +{ + const struct cred *cred; + struct inode *inode; + + if (!sysctl_protected_hardlinks) + return 0; + + cred = current_cred(); + inode = link->dentry->d_inode; + + /* Source inode owner (or CAP_FOWNER) can hardlink all they like, + * otherwise, it must be a safe source. + */ + if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) || + capable(CAP_FOWNER)) + return 0; + + audit_log_link_denied("linkat", link); + return -EPERM; +} + static __always_inline int follow_link(struct path *link, struct nameidata *nd, void **p) { @@ -1818,6 +1935,9 @@ static int path_lookupat(int dfd, const char *name, while (err > 0) { void *cookie; struct path link = path; + err = may_follow_link(&link, nd); + if (unlikely(err)) + break; nd->flags |= LOOKUP_PARENT; err = follow_link(&link, nd, &cookie); if (err) @@ -2277,7 +2397,7 @@ static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, - bool *want_write, bool need_lookup, + bool got_write, bool need_lookup, int *opened) { struct inode *dir = nd->path.dentry->d_inode; @@ -2296,11 +2416,11 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, goto out; } - mode = op->mode & S_IALLUGO; + mode = op->mode; if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) mode &= ~current_umask(); - if (open_flag & O_EXCL) { + if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) { open_flag &= ~O_TRUNC; *opened |= FILE_CREATED; } @@ -2314,12 +2434,9 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ - if ((open_flag & (O_CREAT | O_TRUNC)) || - (open_flag & O_ACCMODE) != O_RDONLY) { - error = mnt_want_write(nd->path.mnt); - if (!error) { - *want_write = true; - } else if (!(open_flag & O_CREAT)) { + if (((open_flag & (O_CREAT | O_TRUNC)) || + (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { + if (!(open_flag & O_CREAT)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open @@ -2327,17 +2444,17 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, goto no_open; } else if (open_flag & (O_EXCL | O_TRUNC)) { /* Fall back and fail with the right error */ - create_error = error; + create_error = -EROFS; goto no_open; } else { /* No side effects, safe to clear O_CREAT */ - create_error = error; + create_error = -EROFS; open_flag &= ~O_CREAT; } } if (open_flag & O_CREAT) { - error = may_o_create(&nd->path, dentry, op->mode); + error = may_o_create(&nd->path, dentry, mode); if (error) { create_error = error; if (open_flag & O_EXCL) @@ -2374,6 +2491,10 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, dput(dentry); dentry = file->f_path.dentry; } + if (create_error && dentry->d_inode == NULL) { + error = create_error; + goto out; + } goto looked_up; } @@ -2438,7 +2559,7 @@ looked_up: static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, - bool *want_write, int *opened) + bool got_write, int *opened) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; @@ -2456,7 +2577,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, goto out_no_open; if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { - return atomic_open(nd, dentry, path, file, op, want_write, + return atomic_open(nd, dentry, path, file, op, got_write, need_lookup, opened); } @@ -2480,10 +2601,10 @@ static int lookup_open(struct nameidata *nd, struct path *path, * a permanent write count is taken through * the 'struct file' in finish_open(). */ - error = mnt_want_write(nd->path.mnt); - if (error) + if (!got_write) { + error = -EROFS; goto out_dput; - *want_write = true; + } *opened |= FILE_CREATED; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) @@ -2513,7 +2634,7 @@ static int do_last(struct nameidata *nd, struct path *path, struct dentry *dir = nd->path.dentry; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; - bool want_write = false; + bool got_write = false; int acc_mode = op->acc_mode; struct inode *inode; bool symlink_ok = false; @@ -2582,8 +2703,18 @@ static int do_last(struct nameidata *nd, struct path *path, } retry_lookup: + if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { + error = mnt_want_write(nd->path.mnt); + if (!error) + got_write = true; + /* + * do _not_ fail yet - we might not need that or fail with + * a different error; let lookup_open() decide; we'll be + * dropping this one anyway. + */ + } mutex_lock(&dir->d_inode->i_mutex); - error = lookup_open(nd, path, file, op, &want_write, opened); + error = lookup_open(nd, path, file, op, got_write, opened); mutex_unlock(&dir->d_inode->i_mutex); if (error <= 0) { @@ -2608,22 +2739,23 @@ retry_lookup: } /* - * It already exists. + * create/update audit record if it already exists. */ - audit_inode(pathname, path->dentry); + if (path->dentry->d_inode) + audit_inode(pathname, path->dentry); /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ - if (want_write) { + if (got_write) { mnt_drop_write(nd->path.mnt); - want_write = false; + got_write = false; } error = -EEXIST; - if (open_flag & O_EXCL) + if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) goto exit_dput; error = follow_managed(path, nd->flags); @@ -2684,7 +2816,7 @@ finish_open: error = mnt_want_write(nd->path.mnt); if (error) goto out; - want_write = true; + got_write = true; } finish_open_created: error = may_open(&nd->path, acc_mode, open_flag); @@ -2711,7 +2843,7 @@ opened: goto exit_fput; } out: - if (want_write) + if (got_write) mnt_drop_write(nd->path.mnt); path_put(&save_parent); terminate_walk(nd); @@ -2735,9 +2867,9 @@ stale_open: nd->inode = dir->d_inode; save_parent.mnt = NULL; save_parent.dentry = NULL; - if (want_write) { + if (got_write) { mnt_drop_write(nd->path.mnt); - want_write = false; + got_write = false; } retried = true; goto retry_lookup; @@ -2777,6 +2909,9 @@ static struct file *path_openat(int dfd, const char *pathname, error = -ELOOP; break; } + error = may_follow_link(&link, nd); + if (unlikely(error)) + break; nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); @@ -2846,6 +2981,7 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path { struct dentry *dentry = ERR_PTR(-EEXIST); struct nameidata nd; + int err2; int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); if (error) return ERR_PTR(error); @@ -2859,16 +2995,19 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path nd.flags &= ~LOOKUP_PARENT; nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; + /* don't fail immediately if it's r/o, at least try to report other errors */ + err2 = mnt_want_write(nd.path.mnt); /* * Do the final lookup. */ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); if (IS_ERR(dentry)) - goto fail; + goto unlock; + error = -EEXIST; if (dentry->d_inode) - goto eexist; + goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - @@ -2876,23 +3015,37 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && nd.last.name[nd.last.len])) { - dput(dentry); - dentry = ERR_PTR(-ENOENT); + error = -ENOENT; + goto fail; + } + if (unlikely(err2)) { + error = err2; goto fail; } *path = nd.path; return dentry; -eexist: - dput(dentry); - dentry = ERR_PTR(-EEXIST); fail: + dput(dentry); + dentry = ERR_PTR(error); +unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + if (!err2) + mnt_drop_write(nd.path.mnt); out: path_put(&nd.path); return dentry; } EXPORT_SYMBOL(kern_path_create); +void done_path_create(struct path *path, struct dentry *dentry) +{ + dput(dentry); + mutex_unlock(&path->dentry->d_inode->i_mutex); + mnt_drop_write(path->mnt); + path_put(path); +} +EXPORT_SYMBOL(done_path_create); + struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) { char *tmp = getname(pathname); @@ -2956,8 +3109,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, struct path path; int error; - if (S_ISDIR(mode)) - return -EPERM; + error = may_mknod(mode); + if (error) + return error; dentry = user_path_create(dfd, filename, &path, 0); if (IS_ERR(dentry)) @@ -2965,15 +3119,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); - error = may_mknod(mode); - if (error) - goto out_dput; - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; error = security_path_mknod(&path, dentry, mode, dev); if (error) - goto out_drop_write; + goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); @@ -2986,13 +3134,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } -out_drop_write: - mnt_drop_write(path.mnt); -out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); - +out: + done_path_create(&path, dentry); return error; } @@ -3038,19 +3181,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; error = security_path_mkdir(&path, dentry, mode); - if (error) - goto out_drop_write; - error = vfs_mkdir(path.dentry->d_inode, dentry, mode); -out_drop_write: - mnt_drop_write(path.mnt); -out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + if (!error) + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); + done_path_create(&path, dentry); return error; } @@ -3144,6 +3278,9 @@ static long do_rmdir(int dfd, const char __user *pathname) } nd.flags &= ~LOOKUP_PARENT; + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit1; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); @@ -3154,19 +3291,15 @@ static long do_rmdir(int dfd, const char __user *pathname) error = -ENOENT; goto exit3; } - error = mnt_want_write(nd.path.mnt); - if (error) - goto exit3; error = security_path_rmdir(&nd.path, dentry); if (error) - goto exit4; + goto exit3; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); -exit4: - mnt_drop_write(nd.path.mnt); exit3: dput(dentry); exit2: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + mnt_drop_write(nd.path.mnt); exit1: path_put(&nd.path); putname(name); @@ -3233,6 +3366,9 @@ static long do_unlinkat(int dfd, const char __user *pathname) goto exit1; nd.flags &= ~LOOKUP_PARENT; + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit1; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); @@ -3245,21 +3381,17 @@ static long do_unlinkat(int dfd, const char __user *pathname) if (!inode) goto slashes; ihold(inode); - error = mnt_want_write(nd.path.mnt); - if (error) - goto exit2; error = security_path_unlink(&nd.path, dentry); if (error) - goto exit3; + goto exit2; error = vfs_unlink(nd.path.dentry->d_inode, dentry); -exit3: - mnt_drop_write(nd.path.mnt); - exit2: +exit2: dput(dentry); } mutex_unlock(&nd.path.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ + mnt_drop_write(nd.path.mnt); exit1: path_put(&nd.path); putname(name); @@ -3324,19 +3456,10 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, if (IS_ERR(dentry)) goto out_putname; - error = mnt_want_write(path.mnt); - if (error) - goto out_dput; error = security_path_symlink(&path, dentry, from); - if (error) - goto out_drop_write; - error = vfs_symlink(path.dentry->d_inode, dentry, from); -out_drop_write: - mnt_drop_write(path.mnt); -out_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + if (!error) + error = vfs_symlink(path.dentry->d_inode, dentry, from); + done_path_create(&path, dentry); out_putname: putname(from); return error; @@ -3436,19 +3559,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; - error = mnt_want_write(new_path.mnt); - if (error) + error = may_linkat(&old_path); + if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) - goto out_drop_write; + goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); -out_drop_write: - mnt_drop_write(new_path.mnt); out_dput: - dput(new_dentry); - mutex_unlock(&new_path.dentry->d_inode->i_mutex); - path_put(&new_path); + done_path_create(&new_path, new_dentry); out: path_put(&old_path); @@ -3644,6 +3763,10 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, if (newnd.last_type != LAST_NORM) goto exit2; + error = mnt_want_write(oldnd.path.mnt); + if (error) + goto exit2; + oldnd.flags &= ~LOOKUP_PARENT; newnd.flags &= ~LOOKUP_PARENT; newnd.flags |= LOOKUP_RENAME_TARGET; @@ -3679,23 +3802,19 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, if (new_dentry == trap) goto exit5; - error = mnt_want_write(oldnd.path.mnt); - if (error) - goto exit5; error = security_path_rename(&oldnd.path, old_dentry, &newnd.path, new_dentry); if (error) - goto exit6; + goto exit5; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); -exit6: - mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_dir, old_dir); + mnt_drop_write(oldnd.path.mnt); exit2: path_put(&newnd.path); putname(to); diff --git a/fs/namespace.c b/fs/namespace.c index c53d3381b0d0..4d31f73e2561 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -283,24 +283,22 @@ static int mnt_is_readonly(struct vfsmount *mnt) } /* - * Most r/o checks on a fs are for operations that take - * discrete amounts of time, like a write() or unlink(). - * We must keep track of when those operations start - * (for permission checks) and when they end, so that - * we can determine when writes are able to occur to - * a filesystem. + * Most r/o & frozen checks on a fs are for operations that take discrete + * amounts of time, like a write() or unlink(). We must keep track of when + * those operations start (for permission checks) and when they end, so that we + * can determine when writes are able to occur to a filesystem. */ /** - * mnt_want_write - get write access to a mount + * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * - * This tells the low-level filesystem that a write is - * about to be performed to it, and makes sure that - * writes are allowed before returning success. When - * the write operation is finished, mnt_drop_write() - * must be called. This is effectively a refcount. + * This tells the low-level filesystem that a write is about to be performed to + * it, and makes sure that writes are allowed (mnt it read-write) before + * returning success. This operation does not protect against filesystem being + * frozen. When the write operation is finished, __mnt_drop_write() must be + * called. This is effectively a refcount. */ -int mnt_want_write(struct vfsmount *m) +int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; @@ -326,6 +324,27 @@ int mnt_want_write(struct vfsmount *m) ret = -EROFS; } preempt_enable(); + + return ret; +} + +/** + * mnt_want_write - get write access to a mount + * @m: the mount on which to take a write + * + * This tells the low-level filesystem that a write is about to be performed to + * it, and makes sure that writes are allowed (mount is read-write, filesystem + * is not frozen) before returning success. When the write operation is + * finished, mnt_drop_write() must be called. This is effectively a refcount. + */ +int mnt_want_write(struct vfsmount *m) +{ + int ret; + + sb_start_write(m->mnt_sb); + ret = __mnt_want_write(m); + if (ret) + sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); @@ -355,38 +374,76 @@ int mnt_clone_write(struct vfsmount *mnt) EXPORT_SYMBOL_GPL(mnt_clone_write); /** - * mnt_want_write_file - get write access to a file's mount + * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * - * This is like mnt_want_write, but it takes a file and can + * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ -int mnt_want_write_file(struct file *file) +int __mnt_want_write_file(struct file *file) { struct inode *inode = file->f_dentry->d_inode; + if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) - return mnt_want_write(file->f_path.mnt); + return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } + +/** + * mnt_want_write_file - get write access to a file's mount + * @file: the file who's mount on which to take a write + * + * This is like mnt_want_write, but it takes a file and can + * do some optimisations if the file is open for write already + */ +int mnt_want_write_file(struct file *file) +{ + int ret; + + sb_start_write(file->f_path.mnt->mnt_sb); + ret = __mnt_want_write_file(file); + if (ret) + sb_end_write(file->f_path.mnt->mnt_sb); + return ret; +} EXPORT_SYMBOL_GPL(mnt_want_write_file); /** - * mnt_drop_write - give up write access to a mount + * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with - * mnt_want_write() call above. + * __mnt_want_write() call above. */ -void mnt_drop_write(struct vfsmount *mnt) +void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } + +/** + * mnt_drop_write - give up write access to a mount + * @mnt: the mount on which to give up write access + * + * Tells the low-level filesystem that we are done performing writes to it and + * also allows filesystem to be frozen again. Must be matched with + * mnt_want_write() call above. + */ +void mnt_drop_write(struct vfsmount *mnt) +{ + __mnt_drop_write(mnt); + sb_end_write(mnt->mnt_sb); +} EXPORT_SYMBOL_GPL(mnt_drop_write); +void __mnt_drop_write_file(struct file *file) +{ + __mnt_drop_write(file->f_path.mnt); +} + void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 5ff0b7b9fc08..43295d45cc2b 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -154,6 +154,10 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) if (status < 0) return; + status = mnt_want_write_file(rec_file); + if (status) + return; + dir = rec_file->f_path.dentry; /* lock the parent */ mutex_lock(&dir->d_inode->i_mutex); @@ -173,11 +177,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) * as well be forgiving and just succeed silently. */ goto out_put; - status = mnt_want_write_file(rec_file); - if (status) - goto out_put; status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU); - mnt_drop_write_file(rec_file); out_put: dput(dentry); out_unlock: @@ -189,6 +189,7 @@ out_unlock: " (err %d); please check that %s exists" " and is writeable", status, user_recovery_dirname); + mnt_drop_write_file(rec_file); nfs4_reset_creds(original_cred); } diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index cc793005a87c..032af381b3aa 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -635,6 +635,7 @@ fh_put(struct svc_fh *fhp) fhp->fh_post_saved = 0; #endif } + fh_drop_write(fhp); if (exp) { exp_put(exp); fhp->fh_export = NULL; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index e15dc45fc5ec..aad6d457b9e8 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -196,6 +196,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, struct dentry *dchild; int type, mode; __be32 nfserr; + int hosterr; dev_t rdev = 0, wanted = new_decode_dev(attr->ia_size); dprintk("nfsd: CREATE %s %.*s\n", @@ -214,6 +215,12 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, nfserr = nfserr_exist; if (isdotent(argp->name, argp->len)) goto done; + hosterr = fh_want_write(dirfhp); + if (hosterr) { + nfserr = nfserrno(hosterr); + goto done; + } + fh_lock_nested(dirfhp, I_MUTEX_PARENT); dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len); if (IS_ERR(dchild)) { @@ -330,7 +337,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp, out_unlock: /* We don't really need to unlock, as fh_put does it. */ fh_unlock(dirfhp); - + fh_drop_write(dirfhp); done: fh_put(dirfhp); return nfsd_return_dirop(nfserr, resp); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 702f64e820c3..a9269f142cc4 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1284,6 +1284,10 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, * If it has, the parent directory should already be locked. */ if (!resfhp->fh_dentry) { + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */ fh_lock_nested(fhp, I_MUTEX_PARENT); dchild = lookup_one_len(fname, dentry, flen); @@ -1327,14 +1331,11 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; } - host_err = fh_want_write(fhp); - if (host_err) - goto out_nfserr; - /* * Get the dir op function pointer. */ err = 0; + host_err = 0; switch (type) { case S_IFREG: host_err = vfs_create(dirp, dchild, iap->ia_mode, true); @@ -1351,10 +1352,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev); break; } - if (host_err < 0) { - fh_drop_write(fhp); + if (host_err < 0) goto out_nfserr; - } err = nfsd_create_setattr(rqstp, resfhp, iap); @@ -1366,7 +1365,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, err2 = nfserrno(commit_metadata(fhp)); if (err2) err = err2; - fh_drop_write(fhp); /* * Update the file handle to get the new inode info. */ @@ -1425,6 +1423,11 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, err = nfserr_notdir; if (!dirp->i_op->lookup) goto out; + + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + fh_lock_nested(fhp, I_MUTEX_PARENT); /* @@ -1457,9 +1460,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, v_atime = verifier[1]&0x7fffffff; } - host_err = fh_want_write(fhp); - if (host_err) - goto out_nfserr; if (dchild->d_inode) { err = 0; @@ -1530,7 +1530,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, if (!err) err = nfserrno(commit_metadata(fhp)); - fh_drop_write(fhp); /* * Update the filehandle to get the new inode info. */ @@ -1541,6 +1540,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, fh_unlock(fhp); if (dchild && !IS_ERR(dchild)) dput(dchild); + fh_drop_write(fhp); return err; out_nfserr: @@ -1621,6 +1621,11 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); if (err) goto out; + + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + fh_lock(fhp); dentry = fhp->fh_dentry; dnew = lookup_one_len(fname, dentry, flen); @@ -1628,10 +1633,6 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp, if (IS_ERR(dnew)) goto out_nfserr; - host_err = fh_want_write(fhp); - if (host_err) - goto out_nfserr; - if (unlikely(path[plen] != 0)) { char *path_alloced = kmalloc(plen+1, GFP_KERNEL); if (path_alloced == NULL) @@ -1691,6 +1692,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, if (isdotent(name, len)) goto out; + host_err = fh_want_write(tfhp); + if (host_err) { + err = nfserrno(host_err); + goto out; + } + fh_lock_nested(ffhp, I_MUTEX_PARENT); ddir = ffhp->fh_dentry; dirp = ddir->d_inode; @@ -1702,18 +1709,13 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, dold = tfhp->fh_dentry; - host_err = fh_want_write(tfhp); - if (host_err) { - err = nfserrno(host_err); - goto out_dput; - } err = nfserr_noent; if (!dold->d_inode) - goto out_drop_write; + goto out_dput; host_err = nfsd_break_lease(dold->d_inode); if (host_err) { err = nfserrno(host_err); - goto out_drop_write; + goto out_dput; } host_err = vfs_link(dold, dirp, dnew); if (!host_err) { @@ -1726,12 +1728,11 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, else err = nfserrno(host_err); } -out_drop_write: - fh_drop_write(tfhp); out_dput: dput(dnew); out_unlock: fh_unlock(ffhp); + fh_drop_write(tfhp); out: return err; @@ -1774,6 +1775,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) goto out; + host_err = fh_want_write(ffhp); + if (host_err) { + err = nfserrno(host_err); + goto out; + } + /* cannot use fh_lock as we need deadlock protective ordering * so do it by hand */ trap = lock_rename(tdentry, fdentry); @@ -1804,17 +1811,14 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, host_err = -EXDEV; if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) goto out_dput_new; - host_err = fh_want_write(ffhp); - if (host_err) - goto out_dput_new; host_err = nfsd_break_lease(odentry->d_inode); if (host_err) - goto out_drop_write; + goto out_dput_new; if (ndentry->d_inode) { host_err = nfsd_break_lease(ndentry->d_inode); if (host_err) - goto out_drop_write; + goto out_dput_new; } host_err = vfs_rename(fdir, odentry, tdir, ndentry); if (!host_err) { @@ -1822,8 +1826,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (!host_err) host_err = commit_metadata(ffhp); } -out_drop_write: - fh_drop_write(ffhp); out_dput_new: dput(ndentry); out_dput_old: @@ -1839,6 +1841,7 @@ out_drop_write: fill_post_wcc(tfhp); unlock_rename(tdentry, fdentry); ffhp->fh_locked = tfhp->fh_locked = 0; + fh_drop_write(ffhp); out: return err; @@ -1864,6 +1867,10 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (err) goto out; + host_err = fh_want_write(fhp); + if (host_err) + goto out_nfserr; + fh_lock_nested(fhp, I_MUTEX_PARENT); dentry = fhp->fh_dentry; dirp = dentry->d_inode; @@ -1882,21 +1889,15 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (!type) type = rdentry->d_inode->i_mode & S_IFMT; - host_err = fh_want_write(fhp); - if (host_err) - goto out_put; - host_err = nfsd_break_lease(rdentry->d_inode); if (host_err) - goto out_drop_write; + goto out_put; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); else host_err = vfs_rmdir(dirp, rdentry); if (!host_err) host_err = commit_metadata(fhp); -out_drop_write: - fh_drop_write(fhp); out_put: dput(rdentry); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index ec0611b2b738..359594c393d2 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -110,12 +110,19 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); static inline int fh_want_write(struct svc_fh *fh) { - return mnt_want_write(fh->fh_export->ex_path.mnt); + int ret = mnt_want_write(fh->fh_export->ex_path.mnt); + + if (!ret) + fh->fh_want_write = 1; + return ret; } static inline void fh_drop_write(struct svc_fh *fh) { - mnt_drop_write(fh->fh_export->ex_path.mnt); + if (fh->fh_want_write) { + fh->fh_want_write = 0; + mnt_drop_write(fh->fh_export->ex_path.mnt); + } } #endif /* LINUX_NFSD_VFS_H */ diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 62cebc8e1a1f..a4d56ac02e6c 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -69,16 +69,18 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; - int ret; + int ret = 0; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; /* -ENOSPC */ + sb_start_pagefault(inode->i_sb); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); - return VM_FAULT_NOPAGE; /* make the VM retry the fault */ + ret = -EFAULT; /* make the VM retry the fault */ + goto out; } /* @@ -112,19 +114,21 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); /* never returns -ENOMEM, but may return -ENOSPC */ if (unlikely(ret)) - return VM_FAULT_SIGBUS; + goto out; - ret = block_page_mkwrite(vma, vmf, nilfs_get_block); - if (ret != VM_FAULT_LOCKED) { + ret = __block_page_mkwrite(vma, vmf, nilfs_get_block); + if (ret) { nilfs_transaction_abort(inode->i_sb); - return ret; + goto out; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_on_page_writeback(page); - return VM_FAULT_LOCKED; + out: + sb_end_pagefault(inode->i_sb); + return block_page_mkwrite_return(ret); } static const struct vm_operations_struct nilfs_file_vm_ops = { diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 0b6387c67e6c..fdb180769485 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -660,8 +660,6 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, goto out_free; } - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); if (ret < 0) printk(KERN_ERR "NILFS: GC failed during preparation: " diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 88e11fb346b6..a5752a589932 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -189,7 +189,7 @@ int nilfs_transaction_begin(struct super_block *sb, if (ret > 0) return 0; - vfs_check_frozen(sb, SB_FREEZE_WRITE); + sb_start_intwrite(sb); nilfs = sb->s_fs_info; down_read(&nilfs->ns_segctor_sem); @@ -205,6 +205,7 @@ int nilfs_transaction_begin(struct super_block *sb, current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); + sb_end_intwrite(sb); return ret; } @@ -246,6 +247,7 @@ int nilfs_transaction_commit(struct super_block *sb) err = nilfs_construct_segment(sb); if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); + sb_end_intwrite(sb); return err; } @@ -264,6 +266,7 @@ void nilfs_transaction_abort(struct super_block *sb) current->journal_info = ti->ti_save; if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) kmem_cache_free(nilfs_transaction_cachep, ti); + sb_end_intwrite(sb); } void nilfs_relax_pressure_in_lock(struct super_block *sb) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 6522cac6057c..6a10812711c1 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -676,17 +676,13 @@ static const struct super_operations nilfs_sops = { .alloc_inode = nilfs_alloc_inode, .destroy_inode = nilfs_destroy_inode, .dirty_inode = nilfs_dirty_inode, - /* .write_inode = nilfs_write_inode, */ - /* .drop_inode = nilfs_drop_inode, */ .evict_inode = nilfs_evict_inode, .put_super = nilfs_put_super, - /* .write_super = nilfs_write_super, */ .sync_fs = nilfs_sync_fs, .freeze_fs = nilfs_freeze, .unfreeze_fs = nilfs_unfreeze, .statfs = nilfs_statfs, .remount_fs = nilfs_remount, - /* .umount_begin */ .show_options = nilfs_show_options }; diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 6eee4177807b..be1267a34cea 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -107,8 +107,6 @@ struct the_nilfs { * used for * - loading the latest checkpoint exclusively. * - allocating a new full segment. - * - protecting s_dirt in the super_block struct - * (see nilfs_write_super) and the following fields. */ struct buffer_head *ns_sbh[2]; struct nilfs_super_block *ns_sbp[2]; diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 7389d2d5e51d..1ecf46448f85 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2084,7 +2084,6 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, if (err) return err; pos = *ppos; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim. */ current->backing_dev_info = mapping->backing_dev_info; written = 0; @@ -2119,6 +2118,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, BUG_ON(iocb->ki_pos != pos); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); @@ -2127,6 +2127,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err < 0) ret = err; } + sb_end_write(inode->i_sb); return ret; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 7602783d7f41..46a1f6d75104 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1971,6 +1971,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd, { struct inode *inode = file->f_path.dentry->d_inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int ret; if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && !ocfs2_writes_unwritten_extents(osb)) @@ -1985,7 +1986,12 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd, if (!(file->f_mode & FMODE_WRITE)) return -EBADF; - return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); + ret = mnt_want_write_file(file); + if (ret) + return ret; + ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); + mnt_drop_write_file(file); + return ret; } static long ocfs2_fallocate(struct file *file, int mode, loff_t offset, @@ -2261,7 +2267,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, if (iocb->ki_left == 0) return 0; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + sb_start_write(inode->i_sb); appending = file->f_flags & O_APPEND ? 1 : 0; direct_io = file->f_flags & O_DIRECT ? 1 : 0; @@ -2436,6 +2442,7 @@ out_sems: ocfs2_iocb_clear_sem_locked(iocb); mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); if (written) ret = written; diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index d96f7f81d8dd..f20edcbfe700 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -928,7 +928,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (get_user(new_clusters, (int __user *)arg)) return -EFAULT; - return ocfs2_group_extend(inode, new_clusters); + status = mnt_want_write_file(filp); + if (status) + return status; + status = ocfs2_group_extend(inode, new_clusters); + mnt_drop_write_file(filp); + return status; case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: if (!capable(CAP_SYS_RESOURCE)) @@ -937,7 +942,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&input, (int __user *) arg, sizeof(input))) return -EFAULT; - return ocfs2_group_add(inode, &input); + status = mnt_want_write_file(filp); + if (status) + return status; + status = ocfs2_group_add(inode, &input); + mnt_drop_write_file(filp); + return status; case OCFS2_IOC_REFLINK: if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 0a42ae96dca7..2dd36af79e26 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -355,11 +355,14 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) if (journal_current_handle()) return jbd2_journal_start(journal, max_buffs); + sb_start_intwrite(osb->sb); + down_read(&osb->journal->j_trans_barrier); handle = jbd2_journal_start(journal, max_buffs); if (IS_ERR(handle)) { up_read(&osb->journal->j_trans_barrier); + sb_end_intwrite(osb->sb); mlog_errno(PTR_ERR(handle)); @@ -388,8 +391,10 @@ int ocfs2_commit_trans(struct ocfs2_super *osb, if (ret < 0) mlog_errno(ret); - if (!nested) + if (!nested) { up_read(&journal->j_trans_barrier); + sb_end_intwrite(osb->sb); + } return ret; } diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 9cd41083e991..d150372fd81d 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -136,6 +136,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) sigset_t oldset; int ret; + sb_start_pagefault(inode->i_sb); ocfs2_block_signals(&oldset); /* @@ -165,6 +166,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) out: ocfs2_unblock_signals(&oldset); + sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9f32d7cbb7a3..30a055049e16 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4466,20 +4466,11 @@ int ocfs2_reflink_ioctl(struct inode *inode, goto out_dput; } - error = mnt_want_write(new_path.mnt); - if (error) { - mlog_errno(error); - goto out_dput; - } - error = ocfs2_vfs_reflink(old_path.dentry, new_path.dentry->d_inode, new_dentry, preserve); - mnt_drop_write(new_path.mnt); out_dput: - dput(new_dentry); - mutex_unlock(&new_path.dentry->d_inode->i_mutex); - path_put(&new_path); + done_path_create(&new_path, new_dentry); out: path_put(&old_path); diff --git a/fs/open.c b/fs/open.c index 1e914b397e12..e1f2cdb91a4d 100644 --- a/fs/open.c +++ b/fs/open.c @@ -164,11 +164,13 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) if (IS_APPEND(inode)) goto out_putf; + sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, file, length); if (!error) error = security_path_truncate(&file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file); + sb_end_write(inode->i_sb); out_putf: fput(file); out: @@ -266,7 +268,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (!file->f_op->fallocate) return -EOPNOTSUPP; - return file->f_op->fallocate(file, mode, offset, len); + sb_start_write(inode->i_sb); + ret = file->f_op->fallocate(file, mode, offset, len); + sb_end_write(inode->i_sb); + return ret; } SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) @@ -620,7 +625,7 @@ static inline int __get_file_write_access(struct inode *inode, /* * Balanced in __fput() */ - error = mnt_want_write(mnt); + error = __mnt_want_write(mnt); if (error) put_write_access(inode); } @@ -654,6 +659,7 @@ static int do_dentry_open(struct file *f, if (unlikely(f->f_flags & O_PATH)) f->f_mode = FMODE_PATH; + path_get(&f->f_path); inode = f->f_path.dentry->d_inode; if (f->f_mode & FMODE_WRITE) { error = __get_file_write_access(inode, f->f_path.mnt); @@ -711,7 +717,7 @@ cleanup_all: * here, so just reset the state. */ file_reset_write(f); - mnt_drop_write(f->f_path.mnt); + __mnt_drop_write(f->f_path.mnt); } } cleanup_file: @@ -739,9 +745,7 @@ int finish_open(struct file *file, struct dentry *dentry, int error; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ - mntget(file->f_path.mnt); - file->f_path.dentry = dget(dentry); - + file->f_path.dentry = dentry; error = do_dentry_open(file, open, current_cred()); if (!error) *opened |= FILE_OPENED; @@ -784,7 +788,6 @@ struct file *dentry_open(const struct path *path, int flags, f->f_flags = flags; f->f_path = *path; - path_get(&f->f_path); error = do_dentry_open(f, NULL, cred); if (!error) { error = open_check_o_direct(f); @@ -849,9 +852,10 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o int lookup_flags = 0; int acc_mode; - if (!(flags & O_CREAT)) - mode = 0; - op->mode = mode; + if (flags & O_CREAT) + op->mode = (mode & S_IALLUGO) | S_IFREG; + else + op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY; diff --git a/fs/pipe.c b/fs/pipe.c index 95cbd6b227e6..8d85d7068c1e 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1016,18 +1016,16 @@ fail_inode: return NULL; } -struct file *create_write_pipe(int flags) +int create_pipe_files(struct file **res, int flags) { int err; - struct inode *inode; + struct inode *inode = get_pipe_inode(); struct file *f; struct path path; - struct qstr name = { .name = "" }; + static struct qstr name = { .name = "" }; - err = -ENFILE; - inode = get_pipe_inode(); if (!inode) - goto err; + return -ENFILE; err = -ENOMEM; path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name); @@ -1041,62 +1039,43 @@ struct file *create_write_pipe(int flags) f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops); if (!f) goto err_dentry; - f->f_mapping = inode->i_mapping; f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); - f->f_version = 0; - return f; + res[0] = alloc_file(&path, FMODE_READ, &read_pipefifo_fops); + if (!res[0]) + goto err_file; + + path_get(&path); + res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK); + res[1] = f; + return 0; - err_dentry: +err_file: + put_filp(f); +err_dentry: free_pipe_info(inode); path_put(&path); - return ERR_PTR(err); + return err; - err_inode: +err_inode: free_pipe_info(inode); iput(inode); - err: - return ERR_PTR(err); -} - -void free_write_pipe(struct file *f) -{ - free_pipe_info(f->f_dentry->d_inode); - path_put(&f->f_path); - put_filp(f); -} - -struct file *create_read_pipe(struct file *wrf, int flags) -{ - /* Grab pipe from the writer */ - struct file *f = alloc_file(&wrf->f_path, FMODE_READ, - &read_pipefifo_fops); - if (!f) - return ERR_PTR(-ENFILE); - - path_get(&wrf->f_path); - f->f_flags = O_RDONLY | (flags & O_NONBLOCK); - - return f; + return err; } int do_pipe_flags(int *fd, int flags) { - struct file *fw, *fr; + struct file *files[2]; int error; int fdw, fdr; if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) return -EINVAL; - fw = create_write_pipe(flags); - if (IS_ERR(fw)) - return PTR_ERR(fw); - fr = create_read_pipe(fw, flags); - error = PTR_ERR(fr); - if (IS_ERR(fr)) - goto err_write_pipe; + error = create_pipe_files(files, flags); + if (error) + return error; error = get_unused_fd_flags(flags); if (error < 0) @@ -1109,8 +1088,8 @@ int do_pipe_flags(int *fd, int flags) fdw = error; audit_fd_pair(fdr, fdw); - fd_install(fdr, fr); - fd_install(fdw, fw); + fd_install(fdr, files[0]); + fd_install(fdw, files[1]); fd[0] = fdr; fd[1] = fdw; @@ -1119,10 +1098,8 @@ int do_pipe_flags(int *fd, int flags) err_fdr: put_unused_fd(fdr); err_read_pipe: - path_put(&fr->f_path); - put_filp(fr); - err_write_pipe: - free_write_pipe(fw); + fput(files[0]); + fput(files[1]); return error; } diff --git a/fs/splice.c b/fs/splice.c index 7bf08fa22ec9..41514dd89462 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -996,6 +996,8 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, }; ssize_t ret; + sb_start_write(inode->i_sb); + pipe_lock(pipe); splice_from_pipe_begin(&sd); @@ -1034,6 +1036,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, *ppos += ret; balance_dirty_pages_ratelimited_nr(mapping, nr_pages); } + sb_end_write(inode->i_sb); return ret; } diff --git a/fs/super.c b/fs/super.c index 4bf714459a4b..0902cfa6a12e 100644 --- a/fs/super.c +++ b/fs/super.c @@ -33,12 +33,19 @@ #include <linux/rculist_bl.h> #include <linux/cleancache.h> #include <linux/fsnotify.h> +#include <linux/lockdep.h> #include "internal.h" LIST_HEAD(super_blocks); DEFINE_SPINLOCK(sb_lock); +static char *sb_writers_name[SB_FREEZE_LEVELS] = { + "sb_writers", + "sb_pagefaults", + "sb_internal", +}; + /* * One thing we have to be careful of with a per-sb shrinker is that we don't * drop the last active reference to the superblock from within the shrinker. @@ -102,6 +109,35 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc) return total_objects; } +static int init_sb_writers(struct super_block *s, struct file_system_type *type) +{ + int err; + int i; + + for (i = 0; i < SB_FREEZE_LEVELS; i++) { + err = percpu_counter_init(&s->s_writers.counter[i], 0); + if (err < 0) + goto err_out; + lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i], + &type->s_writers_key[i], 0); + } + init_waitqueue_head(&s->s_writers.wait); + init_waitqueue_head(&s->s_writers.wait_unfrozen); + return 0; +err_out: + while (--i >= 0) + percpu_counter_destroy(&s->s_writers.counter[i]); + return err; +} + +static void destroy_sb_writers(struct super_block *s) +{ + int i; + + for (i = 0; i < SB_FREEZE_LEVELS; i++) + percpu_counter_destroy(&s->s_writers.counter[i]); +} + /** * alloc_super - create new superblock * @type: filesystem type superblock should belong to @@ -117,18 +153,19 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) if (s) { if (security_sb_alloc(s)) { + /* + * We cannot call security_sb_free() without + * security_sb_alloc() succeeding. So bail out manually + */ kfree(s); s = NULL; goto out; } #ifdef CONFIG_SMP s->s_files = alloc_percpu(struct list_head); - if (!s->s_files) { - security_sb_free(s); - kfree(s); - s = NULL; - goto out; - } else { + if (!s->s_files) + goto err_out; + else { int i; for_each_possible_cpu(i) @@ -137,6 +174,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) #else INIT_LIST_HEAD(&s->s_files); #endif + if (init_sb_writers(s, type)) + goto err_out; s->s_flags = flags; s->s_bdi = &default_backing_dev_info; INIT_HLIST_NODE(&s->s_instances); @@ -178,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) mutex_init(&s->s_dquot.dqio_mutex); mutex_init(&s->s_dquot.dqonoff_mutex); init_rwsem(&s->s_dquot.dqptr_sem); - init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; @@ -190,6 +228,16 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) } out: return s; +err_out: + security_sb_free(s); +#ifdef CONFIG_SMP + if (s->s_files) + free_percpu(s->s_files); +#endif + destroy_sb_writers(s); + kfree(s); + s = NULL; + goto out; } /** @@ -203,6 +251,7 @@ static inline void destroy_super(struct super_block *s) #ifdef CONFIG_SMP free_percpu(s->s_files); #endif + destroy_sb_writers(s); security_sb_free(s); WARN_ON(!list_empty(&s->s_mounts)); kfree(s->s_subtype); @@ -488,46 +537,6 @@ void drop_super(struct super_block *sb) EXPORT_SYMBOL(drop_super); /** - * sync_supers - helper for periodic superblock writeback - * - * Call the write_super method if present on all dirty superblocks in - * the system. This is for the periodic writeback used by most older - * filesystems. For data integrity superblock writeback use - * sync_filesystems() instead. - * - * Note: check the dirty flag before waiting, so we don't - * hold up the sync while mounting a device. (The newly - * mounted device won't need syncing.) - */ -void sync_supers(void) -{ - struct super_block *sb, *p = NULL; - - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (hlist_unhashed(&sb->s_instances)) - continue; - if (sb->s_op->write_super && sb->s_dirt) { - sb->s_count++; - spin_unlock(&sb_lock); - - down_read(&sb->s_umount); - if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN)) - sb->s_op->write_super(sb); - up_read(&sb->s_umount); - - spin_lock(&sb_lock); - if (p) - __put_super(p); - p = sb; - } - } - if (p) - __put_super(p); - spin_unlock(&sb_lock); -} - -/** * iterate_supers - call function for all active superblocks * @f: function to call * @arg: argument to pass to it @@ -651,10 +660,11 @@ struct super_block *get_super_thawed(struct block_device *bdev) { while (1) { struct super_block *s = get_super(bdev); - if (!s || s->s_frozen == SB_UNFROZEN) + if (!s || s->s_writers.frozen == SB_UNFROZEN) return s; up_read(&s->s_umount); - vfs_check_frozen(s, SB_FREEZE_WRITE); + wait_event(s->s_writers.wait_unfrozen, + s->s_writers.frozen == SB_UNFROZEN); put_super(s); } } @@ -732,7 +742,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) int retval; int remount_ro; - if (sb->s_frozen != SB_UNFROZEN) + if (sb->s_writers.frozen != SB_UNFROZEN) return -EBUSY; #ifdef CONFIG_BLOCK @@ -1163,6 +1173,120 @@ out: return ERR_PTR(error); } +/* + * This is an internal function, please use sb_end_{write,pagefault,intwrite} + * instead. + */ +void __sb_end_write(struct super_block *sb, int level) +{ + percpu_counter_dec(&sb->s_writers.counter[level-1]); + /* + * Make sure s_writers are updated before we wake up waiters in + * freeze_super(). + */ + smp_mb(); + if (waitqueue_active(&sb->s_writers.wait)) + wake_up(&sb->s_writers.wait); + rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_); +} +EXPORT_SYMBOL(__sb_end_write); + +#ifdef CONFIG_LOCKDEP +/* + * We want lockdep to tell us about possible deadlocks with freezing but + * it's it bit tricky to properly instrument it. Getting a freeze protection + * works as getting a read lock but there are subtle problems. XFS for example + * gets freeze protection on internal level twice in some cases, which is OK + * only because we already hold a freeze protection also on higher level. Due + * to these cases we have to tell lockdep we are doing trylock when we + * already hold a freeze protection for a higher freeze level. + */ +static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock, + unsigned long ip) +{ + int i; + + if (!trylock) { + for (i = 0; i < level - 1; i++) + if (lock_is_held(&sb->s_writers.lock_map[i])) { + trylock = true; + break; + } + } + rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip); +} +#endif + +/* + * This is an internal function, please use sb_start_{write,pagefault,intwrite} + * instead. + */ +int __sb_start_write(struct super_block *sb, int level, bool wait) +{ +retry: + if (unlikely(sb->s_writers.frozen >= level)) { + if (!wait) + return 0; + wait_event(sb->s_writers.wait_unfrozen, + sb->s_writers.frozen < level); + } + +#ifdef CONFIG_LOCKDEP + acquire_freeze_lock(sb, level, !wait, _RET_IP_); +#endif + percpu_counter_inc(&sb->s_writers.counter[level-1]); + /* + * Make sure counter is updated before we check for frozen. + * freeze_super() first sets frozen and then checks the counter. + */ + smp_mb(); + if (unlikely(sb->s_writers.frozen >= level)) { + __sb_end_write(sb, level); + goto retry; + } + return 1; +} +EXPORT_SYMBOL(__sb_start_write); + +/** + * sb_wait_write - wait until all writers to given file system finish + * @sb: the super for which we wait + * @level: type of writers we wait for (normal vs page fault) + * + * This function waits until there are no writers of given type to given file + * system. Caller of this function should make sure there can be no new writers + * of type @level before calling this function. Otherwise this function can + * livelock. + */ +static void sb_wait_write(struct super_block *sb, int level) +{ + s64 writers; + + /* + * We just cycle-through lockdep here so that it does not complain + * about returning with lock to userspace + */ + rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_); + rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_); + + do { + DEFINE_WAIT(wait); + + /* + * We use a barrier in prepare_to_wait() to separate setting + * of frozen and checking of the counter + */ + prepare_to_wait(&sb->s_writers.wait, &wait, + TASK_UNINTERRUPTIBLE); + + writers = percpu_counter_sum(&sb->s_writers.counter[level-1]); + if (writers) + schedule(); + + finish_wait(&sb->s_writers.wait, &wait); + } while (writers); +} + /** * freeze_super - lock the filesystem and force it into a consistent state * @sb: the super to lock @@ -1170,6 +1294,31 @@ out: * Syncs the super to make sure the filesystem is consistent and calls the fs's * freeze_fs. Subsequent calls to this without first thawing the fs will return * -EBUSY. + * + * During this function, sb->s_writers.frozen goes through these values: + * + * SB_UNFROZEN: File system is normal, all writes progress as usual. + * + * SB_FREEZE_WRITE: The file system is in the process of being frozen. New + * writes should be blocked, though page faults are still allowed. We wait for + * all writes to complete and then proceed to the next stage. + * + * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked + * but internal fs threads can still modify the filesystem (although they + * should not dirty new pages or inodes), writeback can run etc. After waiting + * for all running page faults we sync the filesystem which will clean all + * dirty pages and inodes (no new dirty pages or inodes can be created when + * sync is running). + * + * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs + * modification are blocked (e.g. XFS preallocation truncation on inode + * reclaim). This is usually implemented by blocking new transactions for + * filesystems that have them and need this additional guard. After all + * internal writers are finished we call ->freeze_fs() to finish filesystem + * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is + * mostly auxiliary for filesystems to verify they do not modify frozen fs. + * + * sb->s_writers.frozen is protected by sb->s_umount. */ int freeze_super(struct super_block *sb) { @@ -1177,7 +1326,7 @@ int freeze_super(struct super_block *sb) atomic_inc(&sb->s_active); down_write(&sb->s_umount); - if (sb->s_frozen) { + if (sb->s_writers.frozen != SB_UNFROZEN) { deactivate_locked_super(sb); return -EBUSY; } @@ -1188,33 +1337,53 @@ int freeze_super(struct super_block *sb) } if (sb->s_flags & MS_RDONLY) { - sb->s_frozen = SB_FREEZE_TRANS; - smp_wmb(); + /* Nothing to do really... */ + sb->s_writers.frozen = SB_FREEZE_COMPLETE; up_write(&sb->s_umount); return 0; } - sb->s_frozen = SB_FREEZE_WRITE; + /* From now on, no new normal writers can start */ + sb->s_writers.frozen = SB_FREEZE_WRITE; smp_wmb(); + /* Release s_umount to preserve sb_start_write -> s_umount ordering */ + up_write(&sb->s_umount); + + sb_wait_write(sb, SB_FREEZE_WRITE); + + /* Now we go and block page faults... */ + down_write(&sb->s_umount); + sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; + smp_wmb(); + + sb_wait_write(sb, SB_FREEZE_PAGEFAULT); + + /* All writers are done so after syncing there won't be dirty data */ sync_filesystem(sb); - sb->s_frozen = SB_FREEZE_TRANS; + /* Now wait for internal filesystem counter */ + sb->s_writers.frozen = SB_FREEZE_FS; smp_wmb(); + sb_wait_write(sb, SB_FREEZE_FS); - sync_blockdev(sb->s_bdev); if (sb->s_op->freeze_fs) { ret = sb->s_op->freeze_fs(sb); if (ret) { printk(KERN_ERR "VFS:Filesystem freeze failed\n"); - sb->s_frozen = SB_UNFROZEN; + sb->s_writers.frozen = SB_UNFROZEN; smp_wmb(); - wake_up(&sb->s_wait_unfrozen); + wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return ret; } } + /* + * This is just for debugging purposes so that fs can warn if it + * sees write activity when frozen is set to SB_FREEZE_COMPLETE. + */ + sb->s_writers.frozen = SB_FREEZE_COMPLETE; up_write(&sb->s_umount); return 0; } @@ -1231,7 +1400,7 @@ int thaw_super(struct super_block *sb) int error; down_write(&sb->s_umount); - if (sb->s_frozen == SB_UNFROZEN) { + if (sb->s_writers.frozen == SB_UNFROZEN) { up_write(&sb->s_umount); return -EINVAL; } @@ -1244,16 +1413,15 @@ int thaw_super(struct super_block *sb) if (error) { printk(KERN_ERR "VFS:Filesystem thaw failed\n"); - sb->s_frozen = SB_FREEZE_TRANS; up_write(&sb->s_umount); return error; } } out: - sb->s_frozen = SB_UNFROZEN; + sb->s_writers.frozen = SB_UNFROZEN; smp_wmb(); - wake_up(&sb->s_wait_unfrozen); + wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return 0; diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c index a4759833d62d..614b2b544880 100644 --- a/fs/sysfs/bin.c +++ b/fs/sysfs/bin.c @@ -228,6 +228,8 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = 0; if (bb->vm_ops->page_mkwrite) ret = bb->vm_ops->page_mkwrite(vma, vmf); + else + file_update_time(file); sysfs_put_active(attr_sd); return ret; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 35389ca2d267..7bd6e72afd11 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -37,11 +37,11 @@ * * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we * implement. However, this is not true for 'ubifs_writepage()', which may be - * called with @i_mutex unlocked. For example, when pdflush is doing background - * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal" - * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the - * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()' - * we are only guaranteed that the page is locked. + * called with @i_mutex unlocked. For example, when flusher thread is doing + * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. + * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. + * in the "sys_write -> alloc_pages -> direct reclaim path". So, in + * 'ubifs_writepage()' we are only guaranteed that the page is locked. * * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1c766c39c038..c3fa6c5327a3 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc) mutex_lock(&ui->ui_mutex); /* * Due to races between write-back forced by budgeting - * (see 'sync_some_inodes()') and pdflush write-back, the inode may + * (see 'sync_some_inodes()') and background write-back, the inode may * have already been synchronized, do not do this again. This might * also happen if it was synchronized in an VFS operation, e.g. * 'ubifs_link()'. diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 15052ff916ec..e562dd43f41f 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -124,6 +124,12 @@ xfs_setfilesize_trans_alloc( ioend->io_append_trans = tp; /* + * We will pass freeze protection with a transaction. So tell lockdep + * we released it. + */ + rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 1, _THIS_IP_); + /* * We hand off the transaction to the completion thread now, so * clear the flag here. */ @@ -199,6 +205,15 @@ xfs_end_io( struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; + if (ioend->io_append_trans) { + /* + * We've got freeze protection passed with the transaction. + * Tell lockdep about it. + */ + rwsem_acquire_read( + &ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 0, 1, _THIS_IP_); + } if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ioend->io_error = -EIO; goto done; @@ -1425,6 +1440,9 @@ out_trans_cancel: if (ioend->io_append_trans) { current_set_flags_nested(&ioend->io_append_trans->t_pflags, PF_FSTRANS); + rwsem_acquire_read( + &inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], + 0, 1, _THIS_IP_); xfs_trans_cancel(ioend->io_append_trans, 0); } out_destroy_ioend: diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index c4559c6e6f2c..56afcdb2377d 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -770,10 +770,12 @@ xfs_file_aio_write( if (ocount == 0) return 0; - xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); + sb_start_write(inode->i_sb); - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return -EIO; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + ret = -EIO; + goto out; + } if (unlikely(file->f_flags & O_DIRECT)) ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); @@ -792,6 +794,8 @@ xfs_file_aio_write( ret = err; } +out: + sb_end_write(inode->i_sb); return ret; } diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 1f1535d25a9b..0e0232c3b6d9 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -364,9 +364,15 @@ xfs_fssetdm_by_handle( if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(parfilp); + if (error) + return error; + dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); - if (IS_ERR(dentry)) + if (IS_ERR(dentry)) { + mnt_drop_write_file(parfilp); return PTR_ERR(dentry); + } if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { error = -XFS_ERROR(EPERM); @@ -382,6 +388,7 @@ xfs_fssetdm_by_handle( fsd.fsd_dmstate); out: + mnt_drop_write_file(parfilp); dput(dentry); return error; } @@ -634,7 +641,11 @@ xfs_ioc_space( if (ioflags & IO_INVIS) attr_flags |= XFS_ATTR_DMI; + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags); + mnt_drop_write_file(filp); return -error; } @@ -1163,6 +1174,7 @@ xfs_ioc_fssetxattr( { struct fsxattr fa; unsigned int mask; + int error; if (copy_from_user(&fa, arg, sizeof(fa))) return -EFAULT; @@ -1171,7 +1183,12 @@ xfs_ioc_fssetxattr( if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) mask |= FSX_NONBLOCK; - return -xfs_ioctl_setattr(ip, &fa, mask); + error = mnt_want_write_file(filp); + if (error) + return error; + error = xfs_ioctl_setattr(ip, &fa, mask); + mnt_drop_write_file(filp); + return -error; } STATIC int @@ -1196,6 +1213,7 @@ xfs_ioc_setxflags( struct fsxattr fa; unsigned int flags; unsigned int mask; + int error; if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; @@ -1210,7 +1228,12 @@ xfs_ioc_setxflags( mask |= FSX_NONBLOCK; fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); - return -xfs_ioctl_setattr(ip, &fa, mask); + error = mnt_want_write_file(filp); + if (error) + return error; + error = xfs_ioctl_setattr(ip, &fa, mask); + mnt_drop_write_file(filp); + return -error; } STATIC int @@ -1385,8 +1408,13 @@ xfs_file_ioctl( if (copy_from_user(&dmi, arg, sizeof(dmi))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; + error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, dmi.fsd_dmstate); + mnt_drop_write_file(filp); return -error; } @@ -1434,7 +1462,11 @@ xfs_file_ioctl( if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_swapext(&sxp); + mnt_drop_write_file(filp); return -error; } @@ -1463,9 +1495,14 @@ xfs_file_ioctl( if (copy_from_user(&inout, arg, sizeof(inout))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; + /* input parameter is passed in resblks field of structure */ in = inout.resblks; error = xfs_reserve_blocks(mp, &in, &inout); + mnt_drop_write_file(filp); if (error) return -error; @@ -1496,7 +1533,11 @@ xfs_file_ioctl( if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_data(mp, &in); + mnt_drop_write_file(filp); return -error; } @@ -1506,7 +1547,11 @@ xfs_file_ioctl( if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_log(mp, &in); + mnt_drop_write_file(filp); return -error; } @@ -1516,7 +1561,11 @@ xfs_file_ioctl( if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_rt(mp, &in); + mnt_drop_write_file(filp); return -error; } diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index c4f2da0d2bf5..1244274a5674 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -600,7 +600,11 @@ xfs_file_compat_ioctl( if (xfs_compat_growfs_data_copyin(&in, arg)) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_data(mp, &in); + mnt_drop_write_file(filp); return -error; } case XFS_IOC_FSGROWFSRT_32: { @@ -608,7 +612,11 @@ xfs_file_compat_ioctl( if (xfs_compat_growfs_rt_copyin(&in, arg)) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_growfs_rt(mp, &in); + mnt_drop_write_file(filp); return -error; } #endif @@ -627,7 +635,11 @@ xfs_file_compat_ioctl( offsetof(struct xfs_swapext, sx_stat)) || xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) return -XFS_ERROR(EFAULT); + error = mnt_want_write_file(filp); + if (error) + return error; error = xfs_swapext(&sxp); + mnt_drop_write_file(filp); return -error; } case XFS_IOC_FSBULKSTAT_32: diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 915edf6639f0..973dff6ad935 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -680,9 +680,9 @@ xfs_iomap_write_unwritten( * the same inode that we complete here and might deadlock * on the iolock. */ - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); + sb_start_intwrite(mp->m_super); tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); - tp->t_flags |= XFS_TRANS_RESERVE; + tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 711ca51ca3d7..29c2f83d4147 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1551,7 +1551,7 @@ xfs_unmountfs( int xfs_fs_writable(xfs_mount_t *mp) { - return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) || + return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY)); } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 8724336a9a08..05a05a7b6119 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -311,9 +311,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, #define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ #define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ -#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen) -#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) - /* * Flags for xfs_mountfs */ diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index 97304f10e78a..96548176db80 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c @@ -403,7 +403,7 @@ xfs_sync_worker( if (!(mp->m_super->s_flags & MS_ACTIVE) && !(mp->m_flags & XFS_MOUNT_RDONLY)) { /* dgc: errors ignored here */ - if (mp->m_super->s_frozen == SB_UNFROZEN && + if (mp->m_super->s_writers.frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) error = xfs_fs_log_dummy(mp); else diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index fdf324508c5e..06ed520a767f 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -576,8 +576,12 @@ xfs_trans_alloc( xfs_mount_t *mp, uint type) { - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); - return _xfs_trans_alloc(mp, type, KM_SLEEP); + xfs_trans_t *tp; + + sb_start_intwrite(mp->m_super); + tp = _xfs_trans_alloc(mp, type, KM_SLEEP); + tp->t_flags |= XFS_TRANS_FREEZE_PROT; + return tp; } xfs_trans_t * @@ -588,6 +592,7 @@ _xfs_trans_alloc( { xfs_trans_t *tp; + WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); atomic_inc(&mp->m_active_trans); tp = kmem_zone_zalloc(xfs_trans_zone, memflags); @@ -611,6 +616,8 @@ xfs_trans_free( xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); atomic_dec(&tp->t_mountp->m_active_trans); + if (tp->t_flags & XFS_TRANS_FREEZE_PROT) + sb_end_intwrite(tp->t_mountp->m_super); xfs_trans_free_dqinfo(tp); kmem_zone_free(xfs_trans_zone, tp); } @@ -643,7 +650,11 @@ xfs_trans_dup( ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ASSERT(tp->t_ticket != NULL); - ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); + ntp->t_flags = XFS_TRANS_PERM_LOG_RES | + (tp->t_flags & XFS_TRANS_RESERVE) | + (tp->t_flags & XFS_TRANS_FREEZE_PROT); + /* We gave our writer reference to the new transaction */ + tp->t_flags &= ~XFS_TRANS_FREEZE_PROT; ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; tp->t_blk_res = tp->t_blk_res_used; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index bc2afd52a0b7..db056544cbb5 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -179,6 +179,8 @@ struct xfs_log_item_desc { #define XFS_TRANS_SYNC 0x08 /* make commit synchronous */ #define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */ #define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */ +#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer + count in superblock */ /* * Values for call flags parameter. diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 2c744c7a5b3d..26a92fc28a59 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -491,11 +491,11 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); acpi_status acpi_enter_sleep_state_prep(u8 sleep_state); -acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags); +acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state); ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)) -acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags); +acpi_status acpi_leave_sleep_state_prep(u8 sleep_state); acpi_status acpi_leave_sleep_state(u8 sleep_state); diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 3af87de6a68c..3d00bd5bd7e3 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -803,7 +803,7 @@ typedef u8 acpi_adr_space_type; /* Sleep function dispatch */ -typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags); +typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state); struct acpi_sleep_functions { ACPI_SLEEP_FUNCTION legacy_function; diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 580a6d35c700..c04e0db8a2d6 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -26,7 +26,13 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - fail_fn(count); + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg(count, -1) != 1)) + fail_fn(count); } /** @@ -43,7 +49,8 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - return fail_fn(count); + if (likely(atomic_xchg(count, -1) != 1)) + return fail_fn(count); return 0; } diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 7ff5c99b1638..c78bb997e2c6 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -213,9 +213,12 @@ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 58056865b8e9..dc3a8cd7db8a 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -964,6 +964,8 @@ struct drm_radeon_cs { #define RADEON_INFO_IB_VM_MAX_SIZE 0x0f /* max pipes - needed for compute shaders */ #define RADEON_INFO_MAX_PIPES 0x10 +/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ +#define RADEON_INFO_TIMESTAMP 0x11 struct drm_radeon_info { uint32_t request; diff --git a/include/linux/Kbuild b/include/linux/Kbuild index d9a754474878..fa217607c582 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -391,6 +391,7 @@ header-y += v4l2-dv-timings.h header-y += v4l2-mediabus.h header-y += v4l2-subdev.h header-y += veth.h +header-y += vfio.h header-y += vhost.h header-y += videodev2.h header-y += virtio_9p.h diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3ad510b25283..4f2a76224509 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -96,7 +96,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); void acpi_numa_slit_init (struct acpi_table_slit *slit); void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); -void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); +int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); void acpi_numa_arch_fixup(void); #ifdef CONFIG_ACPI_HOTPLUG_CPU diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 02549017212a..2a5f64a11b77 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -21,8 +21,9 @@ #include <linux/dmaengine.h> #include <linux/interrupt.h> -struct pl08x_lli; struct pl08x_driver_data; +struct pl08x_phy_chan; +struct pl08x_txd; /* Bitmasks for selecting AHB ports for DMA transfers */ enum { @@ -46,170 +47,29 @@ enum { * devices with static assignments * @muxval: a number usually used to poke into some mux regiser to * mux in the signal to this channel - * @cctl_opt: default options for the channel control register + * @cctl_memcpy: options for the channel control register for memcpy + * *** not used for slave channels *** * @addr: source/target address in physical memory for this DMA channel, * can be the address of a FIFO register for burst requests for example. * This can be left undefined if the PrimeCell API is used for configuring * this. - * @circular_buffer: whether the buffer passed in is circular and - * shall simply be looped round round (like a record baby round - * round round round) * @single: the device connected to this channel will request single DMA * transfers, not bursts. (Bursts are default.) * @periph_buses: the device connected to this channel is accessible via * these buses (use PL08X_AHB1 | PL08X_AHB2). */ struct pl08x_channel_data { - char *bus_id; + const char *bus_id; int min_signal; int max_signal; u32 muxval; - u32 cctl; + u32 cctl_memcpy; dma_addr_t addr; - bool circular_buffer; bool single; u8 periph_buses; }; /** - * Struct pl08x_bus_data - information of source or destination - * busses for a transfer - * @addr: current address - * @maxwidth: the maximum width of a transfer on this bus - * @buswidth: the width of this bus in bytes: 1, 2 or 4 - */ -struct pl08x_bus_data { - dma_addr_t addr; - u8 maxwidth; - u8 buswidth; -}; - -/** - * struct pl08x_phy_chan - holder for the physical channels - * @id: physical index to this channel - * @lock: a lock to use when altering an instance of this struct - * @signal: the physical signal (aka channel) serving this physical channel - * right now - * @serving: the virtual channel currently being served by this physical - * channel - * @locked: channel unavailable for the system, e.g. dedicated to secure - * world - */ -struct pl08x_phy_chan { - unsigned int id; - void __iomem *base; - spinlock_t lock; - int signal; - struct pl08x_dma_chan *serving; - bool locked; -}; - -/** - * struct pl08x_sg - structure containing data per sg - * @src_addr: src address of sg - * @dst_addr: dst address of sg - * @len: transfer len in bytes - * @node: node for txd's dsg_list - */ -struct pl08x_sg { - dma_addr_t src_addr; - dma_addr_t dst_addr; - size_t len; - struct list_head node; -}; - -/** - * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor - * @tx: async tx descriptor - * @node: node for txd list for channels - * @dsg_list: list of children sg's - * @direction: direction of transfer - * @llis_bus: DMA memory address (physical) start for the LLIs - * @llis_va: virtual memory address start for the LLIs - * @cctl: control reg values for current txd - * @ccfg: config reg values for current txd - */ -struct pl08x_txd { - struct dma_async_tx_descriptor tx; - struct list_head node; - struct list_head dsg_list; - enum dma_transfer_direction direction; - dma_addr_t llis_bus; - struct pl08x_lli *llis_va; - /* Default cctl value for LLIs */ - u32 cctl; - /* - * Settings to be put into the physical channel when we - * trigger this txd. Other registers are in llis_va[0]. - */ - u32 ccfg; -}; - -/** - * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel - * states - * @PL08X_CHAN_IDLE: the channel is idle - * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport - * channel and is running a transfer on it - * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport - * channel, but the transfer is currently paused - * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport - * channel to become available (only pertains to memcpy channels) - */ -enum pl08x_dma_chan_state { - PL08X_CHAN_IDLE, - PL08X_CHAN_RUNNING, - PL08X_CHAN_PAUSED, - PL08X_CHAN_WAITING, -}; - -/** - * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel - * @chan: wrappped abstract channel - * @phychan: the physical channel utilized by this channel, if there is one - * @phychan_hold: if non-zero, hold on to the physical channel even if we - * have no pending entries - * @tasklet: tasklet scheduled by the IRQ to handle actual work etc - * @name: name of channel - * @cd: channel platform data - * @runtime_addr: address for RX/TX according to the runtime config - * @runtime_direction: current direction of this channel according to - * runtime config - * @pend_list: queued transactions pending on this channel - * @at: active transaction on this channel - * @lock: a lock for this channel data - * @host: a pointer to the host (internal use) - * @state: whether the channel is idle, paused, running etc - * @slave: whether this channel is a device (slave) or for memcpy - * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave - * channels. Fill with 'true' if peripheral should be flow controller. Direction - * will be selected at Runtime. - * @waiting: a TX descriptor on this channel which is waiting for a physical - * channel to become available - */ -struct pl08x_dma_chan { - struct dma_chan chan; - struct pl08x_phy_chan *phychan; - int phychan_hold; - struct tasklet_struct tasklet; - char *name; - const struct pl08x_channel_data *cd; - dma_addr_t src_addr; - dma_addr_t dst_addr; - u32 src_cctl; - u32 dst_cctl; - enum dma_transfer_direction runtime_direction; - struct list_head pend_list; - struct pl08x_txd *at; - spinlock_t lock; - struct pl08x_driver_data *host; - enum pl08x_dma_chan_state state; - bool slave; - bool device_fc; - struct pl08x_txd *waiting; -}; - -/** * struct pl08x_platform_data - the platform configuration for the PL08x * PrimeCells. * @slave_channels: the channels defined for the different devices on the @@ -229,8 +89,8 @@ struct pl08x_platform_data { const struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; - int (*get_signal)(struct pl08x_dma_chan *); - void (*put_signal)(struct pl08x_dma_chan *); + int (*get_signal)(const struct pl08x_channel_data *); + void (*put_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; u8 mem_buses; }; diff --git a/include/linux/audit.h b/include/linux/audit.h index 22f292a917a3..36abf2aa7e68 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -130,6 +130,7 @@ #define AUDIT_LAST_KERN_ANOM_MSG 1799 #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ +#define AUDIT_ANOM_LINK 1702 /* Suspicious use of file links */ #define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ #define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ #define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ @@ -687,6 +688,8 @@ extern void audit_log_d_path(struct audit_buffer *ab, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); +extern void audit_log_link_denied(const char *operation, + struct path *link); extern void audit_log_lost(const char *message); #ifdef CONFIG_SECURITY extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); @@ -716,6 +719,7 @@ extern int audit_enabled; #define audit_log_untrustedstring(a,s) do { ; } while (0) #define audit_log_d_path(b, p, d) do { ; } while (0) #define audit_log_key(b, k) do { ; } while (0) +#define audit_log_link_denied(o, l) do { ; } while (0) #define audit_log_secctx(b,s) do { ; } while (0) #define audit_enabled 0 #endif diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c97c6b9cd38e..2a9a9abc9126 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -124,7 +124,6 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, void bdi_start_background_writeback(struct backing_dev_info *bdi); int bdi_writeback_thread(void *data); int bdi_has_dirty_io(struct backing_dev_info *bdi); -void bdi_arm_supers_timer(void); void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 3c80885fa829..d323a4b4143c 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -89,6 +89,12 @@ #define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 #define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 #define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 +#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001 +#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002 +#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004 +#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008 +#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010 +#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020 #define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */ #define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */ #define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 07954b05b86c..4e72a9d48232 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -46,16 +46,23 @@ struct blkcg_gq; struct request; typedef void (rq_end_io_fn)(struct request *, int); +#define BLK_RL_SYNCFULL (1U << 0) +#define BLK_RL_ASYNCFULL (1U << 1) + struct request_list { + struct request_queue *q; /* the queue this rl belongs to */ +#ifdef CONFIG_BLK_CGROUP + struct blkcg_gq *blkg; /* blkg this request pool belongs to */ +#endif /* * count[], starved[], and wait[] are indexed by * BLK_RW_SYNC/BLK_RW_ASYNC */ - int count[2]; - int starved[2]; - int elvpriv; - mempool_t *rq_pool; - wait_queue_head_t wait[2]; + int count[2]; + int starved[2]; + mempool_t *rq_pool; + wait_queue_head_t wait[2]; + unsigned int flags; }; /* @@ -138,6 +145,7 @@ struct request { struct hd_struct *part; unsigned long start_time; #ifdef CONFIG_BLK_CGROUP + struct request_list *rl; /* rl this rq is alloced from */ unsigned long long start_time_ns; unsigned long long io_start_time_ns; /* when passed to hardware */ #endif @@ -282,11 +290,16 @@ struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; + int nr_rqs[2]; /* # allocated [a]sync rqs */ + int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ /* - * the queue request freelist, one for reads and one for writes + * If blkcg is not used, @q->root_rl serves all requests. If blkcg + * is used, root blkg allocates from @q->root_rl and all other + * blkgs from their own blkg->rl. Which one to use should be + * determined using bio_request_list(). */ - struct request_list rq; + struct request_list root_rl; request_fn_proc *request_fn; make_request_fn *make_request_fn; @@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq) return rw_is_sync(rq->cmd_flags); } -static inline int blk_queue_full(struct request_queue *q, int sync) +static inline bool blk_rl_full(struct request_list *rl, bool sync) { - if (sync) - return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + return rl->flags & flag; } -static inline void blk_set_queue_full(struct request_queue *q, int sync) +static inline void blk_set_rl_full(struct request_list *rl, bool sync) { - if (sync) - queue_flag_set(QUEUE_FLAG_SYNCFULL, q); - else - queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags |= flag; } -static inline void blk_clear_queue_full(struct request_queue *q, int sync) +static inline void blk_clear_rl_full(struct request_list *rl, bool sync) { - if (sync) - queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); - else - queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); + unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; + + rl->flags &= ~flag; } @@ -911,11 +922,15 @@ struct blk_plug { }; #define BLK_MAX_REQUEST_COUNT 16 +struct blk_plug_cb; +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); struct blk_plug_cb { struct list_head list; - void (*callback)(struct blk_plug_cb *); + blk_plug_cb_fn callback; + void *data; }; - +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, + void *data, int size); extern void blk_start_plug(struct blk_plug *); extern void blk_finish_plug(struct blk_plug *); extern void blk_flush_plug_list(struct blk_plug *, bool); diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h index faf8a45af210..a8519446c111 100644 --- a/include/linux/blkpg.h +++ b/include/linux/blkpg.h @@ -40,6 +40,7 @@ struct blkpg_ioctl_arg { /* The subfunctions (for the op field) */ #define BLKPG_ADD_PARTITION 1 #define BLKPG_DEL_PARTITION 2 +#define BLKPG_RESIZE_PARTITION 3 /* Sizes of name fields. Unused at present. */ #define BLKPG_DEVNAMELTH 64 diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index f55ab8cdc106..4d0fb3df2f4a 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result, int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, bsg_job_fn *job_fn, int dd_job_size); void bsg_request_fn(struct request_queue *q); -void bsg_remove_queue(struct request_queue *q); void bsg_goose_queue(struct request_queue *q); #endif diff --git a/include/linux/can.h b/include/linux/can.h index 018055efc034..e52958d7c2d1 100644 --- a/include/linux/can.h +++ b/include/linux/can.h @@ -74,20 +74,21 @@ struct can_frame { /* * defined bits for canfd_frame.flags * - * As the default for CAN FD should be to support the high data rate in the - * payload section of the frame (HDR) and to support up to 64 byte in the - * data section (EDL) the bits are only set in the non-default case. - * Btw. as long as there's no real implementation for CAN FD network driver - * these bits are only preliminary. + * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to + * be set in the CAN frame bitstream on the wire. The EDL bit switch turns + * the CAN controllers bitstream processor into the CAN FD mode which creates + * two new options within the CAN FD frame specification: * - * RX: NOHDR/NOEDL - info about received CAN FD frame - * ESI - bit from originating CAN controller - * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller - * ESI - bit is set by local CAN controller + * Bit Rate Switch - to indicate a second bitrate is/was used for the payload + * Error State Indicator - represents the error state of the transmitting node + * + * As the CANFD_ESI bit is internally generated by the transmitting CAN + * controller only the CANFD_BRS bit is relevant for real CAN controllers when + * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make + * sense for virtual CAN interfaces to test applications with echoed frames. */ -#define CANFD_NOHDR 0x01 /* frame without high data rate */ -#define CANFD_NOEDL 0x02 /* frame without extended data length */ -#define CANFD_ESI 0x04 /* error state indicator */ +#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */ +#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */ /** * struct canfd_frame - CAN flexible data rate frame structure diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 133ddcf83397..ef658147e4e8 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - bool sync); + bool sync, bool *contended); extern int compact_pgdat(pg_data_t *pgdat, int order); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync) + bool sync, bool *contended) { return COMPACT_CONTINUE; } diff --git a/include/linux/efi.h b/include/linux/efi.h index 103adc6d7e3a..ec45ccd8708a 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -503,6 +503,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); extern int __init efi_uart_console_only (void); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); +extern unsigned long efi_get_time(void); +extern int efi_set_rtc_mmss(unsigned long nowtime); extern void efi_reserve_boot_services(void); extern struct efi_memory_map memmap; diff --git a/include/linux/fs.h b/include/linux/fs.h index d7eed5b98ae2..aa110476a95b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -414,6 +414,7 @@ struct inodes_stat_t { #include <linux/shrinker.h> #include <linux/migrate_mode.h> #include <linux/uidgid.h> +#include <linux/lockdep.h> #include <asm/byteorder.h> @@ -440,6 +441,8 @@ extern unsigned long get_max_files(void); extern int sysctl_nr_open; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; +extern int sysctl_protected_symlinks; +extern int sysctl_protected_hardlinks; struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, @@ -1162,7 +1165,6 @@ struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_release_private)(struct file_lock *); void (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock **, int); }; @@ -1446,6 +1448,8 @@ extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); +struct mm_struct; + /* * Umount options */ @@ -1459,10 +1463,34 @@ extern int send_sigurg(struct fown_struct *fown); extern struct list_head super_blocks; extern spinlock_t sb_lock; +/* Possible states of 'frozen' field */ +enum { + SB_UNFROZEN = 0, /* FS is unfrozen */ + SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ + SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ + SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop + * internal threads if needed) */ + SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ +}; + +#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) + +struct sb_writers { + /* Counters for counting writers at each level */ + struct percpu_counter counter[SB_FREEZE_LEVELS]; + wait_queue_head_t wait; /* queue for waiting for + writers / faults to finish */ + int frozen; /* Is sb frozen? */ + wait_queue_head_t wait_unfrozen; /* queue for waiting for + sb to be thawed */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map lock_map[SB_FREEZE_LEVELS]; +#endif +}; + struct super_block { struct list_head s_list; /* Keep this first */ dev_t s_dev; /* search index; _not_ kdev_t */ - unsigned char s_dirt; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; /* Max file size */ @@ -1506,8 +1534,7 @@ struct super_block { struct hlist_node s_instances; struct quota_info s_dquot; /* Diskquota specific options */ - int s_frozen; - wait_queue_head_t s_wait_unfrozen; + struct sb_writers s_writers; char s_id[32]; /* Informational name */ u8 s_uuid[16]; /* UUID */ @@ -1562,14 +1589,117 @@ extern struct timespec current_fs_time(struct super_block *sb); /* * Snapshotting support. */ -enum { - SB_UNFROZEN = 0, - SB_FREEZE_WRITE = 1, - SB_FREEZE_TRANS = 2, -}; -#define vfs_check_frozen(sb, level) \ - wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) +void __sb_end_write(struct super_block *sb, int level); +int __sb_start_write(struct super_block *sb, int level, bool wait); + +/** + * sb_end_write - drop write access to a superblock + * @sb: the super we wrote to + * + * Decrement number of writers to the filesystem. Wake up possible waiters + * wanting to freeze the filesystem. + */ +static inline void sb_end_write(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_WRITE); +} + +/** + * sb_end_pagefault - drop write access to a superblock from a page fault + * @sb: the super we wrote to + * + * Decrement number of processes handling write page fault to the filesystem. + * Wake up possible waiters wanting to freeze the filesystem. + */ +static inline void sb_end_pagefault(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_PAGEFAULT); +} + +/** + * sb_end_intwrite - drop write access to a superblock for internal fs purposes + * @sb: the super we wrote to + * + * Decrement fs-internal number of writers to the filesystem. Wake up possible + * waiters wanting to freeze the filesystem. + */ +static inline void sb_end_intwrite(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_FS); +} + +/** + * sb_start_write - get write access to a superblock + * @sb: the super we write to + * + * When a process wants to write data or metadata to a file system (i.e. dirty + * a page or an inode), it should embed the operation in a sb_start_write() - + * sb_end_write() pair to get exclusion against file system freezing. This + * function increments number of writers preventing freezing. If the file + * system is already frozen, the function waits until the file system is + * thawed. + * + * Since freeze protection behaves as a lock, users have to preserve + * ordering of freeze protection and other filesystem locks. Generally, + * freeze protection should be the outermost lock. In particular, we have: + * + * sb_start_write + * -> i_mutex (write path, truncate, directory ops, ...) + * -> s_umount (freeze_super, thaw_super) + */ +static inline void sb_start_write(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_WRITE, true); +} + +static inline int sb_start_write_trylock(struct super_block *sb) +{ + return __sb_start_write(sb, SB_FREEZE_WRITE, false); +} + +/** + * sb_start_pagefault - get write access to a superblock from a page fault + * @sb: the super we write to + * + * When a process starts handling write page fault, it should embed the + * operation into sb_start_pagefault() - sb_end_pagefault() pair to get + * exclusion against file system freezing. This is needed since the page fault + * is going to dirty a page. This function increments number of running page + * faults preventing freezing. If the file system is already frozen, the + * function waits until the file system is thawed. + * + * Since page fault freeze protection behaves as a lock, users have to preserve + * ordering of freeze protection and other filesystem locks. It is advised to + * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault + * handling code implies lock dependency: + * + * mmap_sem + * -> sb_start_pagefault + */ +static inline void sb_start_pagefault(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); +} + +/* + * sb_start_intwrite - get write access to a superblock for internal fs purposes + * @sb: the super we write to + * + * This is the third level of protection against filesystem freezing. It is + * free for use by a filesystem. The only requirement is that it must rank + * below sb_start_pagefault. + * + * For example filesystem can call sb_start_intwrite() when starting a + * transaction which somewhat eases handling of freezing for internal sources + * of filesystem changes (internal fs threads, discarding preallocation on file + * close, etc.). + */ +static inline void sb_start_intwrite(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_FS, true); +} + extern bool inode_owner_or_capable(const struct inode *inode); @@ -1730,7 +1860,6 @@ struct super_operations { int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -1893,6 +2022,7 @@ struct file_system_type { struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; @@ -2265,7 +2395,6 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); -extern void sync_supers(void); extern void emergency_sync(void); extern void emergency_remount(void); #ifdef CONFIG_BLOCK @@ -2335,9 +2464,6 @@ static inline void i_readcount_inc(struct inode *inode) } #endif extern int do_pipe_flags(int *, int); -extern struct file *create_read_pipe(struct file *f, int flags); -extern struct file *create_write_pipe(int flags); -extern void free_write_pipe(struct file *); extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern struct file * open_exec(const char *); diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index af961d6f7ab1..642928cf57b4 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -306,9 +306,10 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type, static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, struct pt_regs *regs, void *head) + u64 count, struct pt_regs *regs, void *head, + struct task_struct *task) { - perf_tp_event(addr, count, raw_data, size, regs, head, rctx); + perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); } #endif diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 9303348965fb..d8c713e148e3 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -57,6 +57,9 @@ * * 7.19 * - add FUSE_FALLOCATE + * + * 7.20 + * - add FUSE_AUTO_INVAL_DATA */ #ifndef _LINUX_FUSE_H @@ -88,7 +91,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 19 +#define FUSE_KERNEL_MINOR_VERSION 20 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -163,10 +166,19 @@ struct fuse_file_lock { /** * INIT request/reply flags * + * FUSE_ASYNC_READ: asynchronous read requests * FUSE_POSIX_LOCKS: remote locking for POSIX file locks + * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported) + * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." + * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB * FUSE_DONT_MASK: don't apply umask to file mode on create operations + * FUSE_SPLICE_WRITE: kernel supports splice write on the device + * FUSE_SPLICE_MOVE: kernel supports splice move on the device + * FUSE_SPLICE_READ: kernel supports splice read on the device * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks + * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories + * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -175,7 +187,12 @@ struct fuse_file_lock { #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) #define FUSE_DONT_MASK (1 << 6) +#define FUSE_SPLICE_WRITE (1 << 7) +#define FUSE_SPLICE_MOVE (1 << 8) +#define FUSE_SPLICE_READ (1 << 9) #define FUSE_FLOCK_LOCKS (1 << 10) +#define FUSE_HAS_IOCTL_DIR (1 << 11) +#define FUSE_AUTO_INVAL_DATA (1 << 12) /** * CUSE INIT request/reply flags diff --git a/include/linux/genhd.h b/include/linux/genhd.h index ae0aaa9d42fa..4f440b3e89fe 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -97,7 +97,13 @@ struct partition_meta_info { struct hd_struct { sector_t start_sect; + /* + * nr_sects is protected by sequence counter. One might extend a + * partition while IO is happening to it and update of nr_sects + * can be non-atomic on 32bit machines with 64bit sector_t. + */ sector_t nr_sects; + seqcount_t nr_sects_seq; sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; @@ -647,6 +653,57 @@ static inline void hd_struct_put(struct hd_struct *part) __delete_partition(part); } +/* + * Any access of part->nr_sects which is not protected by partition + * bd_mutex or gendisk bdev bd_mutex, should be done using this + * accessor function. + * + * Code written along the lines of i_size_read() and i_size_write(). + * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption + * on. + */ +static inline sector_t part_nr_sects_read(struct hd_struct *part) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + sector_t nr_sects; + unsigned seq; + do { + seq = read_seqcount_begin(&part->nr_sects_seq); + nr_sects = part->nr_sects; + } while (read_seqcount_retry(&part->nr_sects_seq, seq)); + return nr_sects; +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + sector_t nr_sects; + + preempt_disable(); + nr_sects = part->nr_sects; + preempt_enable(); + return nr_sects; +#else + return part->nr_sects; +#endif +} + +/* + * Should be called with mutex lock held (typically bd_mutex) of partition + * to provide mutual exlusion among writers otherwise seqcount might be + * left in wrong state leaving the readers spinning infinitely. + */ +static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); +#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; + preempt_enable(); +#else + part->nr_sects = size; +#endif +} + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index bb7f30971858..305f23cd7cff 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -22,7 +22,7 @@ * * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) * - bit 26 is the NMI_MASK - * - bit 28 is the PREEMPT_ACTIVE flag + * - bit 27 is the PREEMPT_ACTIVE flag * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 6960fc1841a7..aa2e167e1ef4 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -96,21 +96,6 @@ static inline void team_netpoll_send_skb(struct team_port *port, } #endif -static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, - struct sk_buff *skb) -{ - BUILD_BUG_ON(sizeof(skb->queue_mapping) != - sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); - skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); - - skb->dev = port->dev; - if (unlikely(netpoll_tx_running(port->dev))) { - team_netpoll_send_skb(port, skb); - return 0; - } - return dev_queue_xmit(skb); -} - struct team_mode_ops { int (*init)(struct team *team); void (*exit)(struct team *team); @@ -200,6 +185,21 @@ struct team { long mode_priv[TEAM_MODE_PRIV_LONGS]; }; +static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, + struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(skb->queue_mapping) != + sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); + skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); + + skb->dev = port->dev; + if (unlikely(netpoll_tx_running(team->dev))) { + team_netpoll_send_skb(port, skb); + return 0; + } + return dev_queue_xmit(skb); +} + static inline struct hlist_head *team_port_index_hash(struct team *team, int port_index) { diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h index b76b4a87065e..be91f344d5fc 100644 --- a/include/linux/iio/frequency/adf4350.h +++ b/include/linux/iio/frequency/adf4350.h @@ -87,6 +87,8 @@ #define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */ #define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */ #define ADF4350_MAX_MODULUS 4095 +#define ADF4350_MAX_R_CNT 1023 + /** * struct adf4350_platform_data - platform specific information diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h index f875b316249d..16625d799b6f 100644 --- a/include/linux/input/eeti_ts.h +++ b/include/linux/input/eeti_ts.h @@ -2,6 +2,7 @@ #define LINUX_INPUT_EETI_TS_H struct eeti_ts_platform_data { + int irq_gpio; unsigned int irq_active_high; }; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 54d6d690073c..7e83370e6fd2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -20,6 +20,7 @@ #define __LINUX_IOMMU_H #include <linux/errno.h> +#include <linux/types.h> #define IOMMU_READ (1) #define IOMMU_WRITE (2) @@ -30,6 +31,7 @@ struct iommu_group; struct bus_type; struct device; struct iommu_domain; +struct notifier_block; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 379e433e15e0..879db26ec401 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -369,6 +369,7 @@ struct ipv6_pinfo { __u8 rcv_tclass; __u32 dst_cookie; + __u32 rx_dst_cookie; struct ipv6_mc_socklist __rcu *ipv6_mc_list; struct ipv6_ac_socklist *ipv6_ac_list; diff --git a/include/linux/irq.h b/include/linux/irq.h index 553fb66da130..216b0ba109d7 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -349,6 +349,7 @@ enum { IRQCHIP_MASK_ON_SUSPEND = (1 << 2), IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), IRQCHIP_SKIP_SET_WAKE = (1 << 4), + IRQCHIP_ONESHOT_SAFE = (1 << 5), }; /* This include will go away once we isolated irq_desc usage to core code */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f334c7fab967..3efc43f3f162 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1125,6 +1125,7 @@ extern int jbd2_journal_destroy (journal_t *); extern int jbd2_journal_recover (journal_t *journal); extern int jbd2_journal_wipe (journal_t *, int); extern int jbd2_journal_skip_recovery (journal_t *); +extern void jbd2_journal_update_sb_errno(journal_t *); extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, unsigned long, int); extern void __jbd2_journal_abort_hard (journal_t *); diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 265e2c3cbd1c..82680541576d 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -39,9 +39,6 @@ # error Invalid value of HZ. #endif -/* LATCH is used in the interval timer and ftape setup. */ -#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ - /* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can * improve accuracy by shifting LSH bits, hence calculating: * (NOM << LSH) / DEN @@ -54,18 +51,30 @@ #define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) -/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ -#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) +#ifdef CLOCK_TICK_RATE +/* LATCH is used in the interval timer and ftape setup. */ +# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ + +/* + * HZ is the requested value. However the CLOCK_TICK_RATE may not allow + * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy) + */ +# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8)) +#else +# define SHIFTED_HZ (HZ << 8) +#endif -/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */ -#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8)) +/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ +#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8)) /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) -/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */ -/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */ -#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8)) +/* + * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and + * a value TUSEC for TICK_USEC (can be set bij adjtimex) + */ +#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8)) /* some arch's have a small-data section that can be accessed register-relative * but that can only take up to, say, 4-byte variables. jiffies being part of diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 064725854db8..42d9e863a313 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -75,8 +75,6 @@ extern const char *kdb_diemsg; #define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */ #define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */ #define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */ -#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when - * kdb is off */ #define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, * kdb is disabled */ #define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do diff --git a/include/linux/kref.h b/include/linux/kref.h index 9c07dcebded7..65af6887872f 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -18,6 +18,7 @@ #include <linux/bug.h> #include <linux/atomic.h> #include <linux/kernel.h> +#include <linux/mutex.h> struct kref { atomic_t refcount; @@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) { return kref_sub(kref, 1, release); } + +static inline int kref_put_mutex(struct kref *kref, + void (*release)(struct kref *kref), + struct mutex *lock) +{ + WARN_ON(release == NULL); + if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { + mutex_lock(lock); + if (unlikely(!atomic_dec_and_test(&kref->refcount))) { + mutex_unlock(lock); + return 0; + } + release(kref); + return 1; + } + return 0; +} #endif /* _KREF_H_ */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 7c08052e3321..39ed62ab5b8a 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -26,7 +26,8 @@ typedef struct mempool_s { extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int nid); + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int nid); extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); extern void mempool_destroy(mempool_t *pool); diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index 40c372165f3e..32a1b5cfeba1 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -16,6 +16,7 @@ struct pcap_subdev { struct pcap_platform_data { unsigned int irq_base; unsigned int config; + int gpio; void (*init) (void *); /* board specific init */ int num_subdevs; struct pcap_subdev *subdevs; diff --git a/include/linux/mm.h b/include/linux/mm.h index bd079a1b0fdc..311be906b57d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1441,6 +1441,7 @@ extern void truncate_inode_pages_range(struct address_space *, /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); +extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); diff --git a/include/linux/namei.h b/include/linux/namei.h index d2ef8b34b967..4bf19d8174ed 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -67,6 +67,7 @@ extern int kern_path(const char *, unsigned, struct path *); extern struct dentry *kern_path_create(int, const char *, struct path *, int); extern struct dentry *user_path_create(int, const char __user *, struct path *, int); +extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eb06e58bed0b..59dc05f38247 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -953,7 +953,8 @@ struct net_device_ops { #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); int (*ndo_netpoll_setup)(struct net_device *dev, - struct netpoll_info *info); + struct netpoll_info *info, + gfp_t gfp); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -1300,6 +1301,8 @@ struct net_device { /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; +#define GSO_MAX_SEGS 65535 + u16 gso_max_segs; #ifdef CONFIG_DCB /* Data Center Bridging netlink ops */ @@ -1519,6 +1522,8 @@ struct packet_type { struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); + bool (*id_match)(struct packet_type *ptype, + struct sock *sk); void *af_packet_priv; struct list_head list; }; diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index 0dfc8b7210a3..89f2a627f3f0 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -164,7 +164,7 @@ extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen, - union nf_inet_addr *addr); + union nf_inet_addr *addr, bool delim); extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, unsigned int off, unsigned int datalen, const char *name, diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 28f5389c924b..66d5379c305e 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -23,6 +23,7 @@ struct netpoll { u8 remote_mac[ETH_ALEN]; struct list_head rx; /* rx_np list element */ + struct rcu_head rcu; }; struct netpoll_info { @@ -38,28 +39,40 @@ struct netpoll_info { struct delayed_work tx_work; struct netpoll *netpoll; + struct rcu_head rcu; }; void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev); +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp); int netpoll_setup(struct netpoll *np); int netpoll_trap(void); void netpoll_set_trap(int trap); void __netpoll_cleanup(struct netpoll *np); +void __netpoll_free_rcu(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -int __netpoll_rx(struct sk_buff *skb); +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo); void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, struct net_device *dev); static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { + unsigned long flags; + local_irq_save(flags); netpoll_send_skb_on_dev(np, skb, np->dev); + local_irq_restore(flags); } #ifdef CONFIG_NETPOLL +static inline bool netpoll_rx_on(struct sk_buff *skb) +{ + struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); + + return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); +} + static inline bool netpoll_rx(struct sk_buff *skb) { struct netpoll_info *npinfo; @@ -67,14 +80,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) bool ret = false; local_irq_save(flags); - npinfo = rcu_dereference_bh(skb->dev->npinfo); - if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) + if (!netpoll_rx_on(skb)) goto out; + npinfo = rcu_dereference_bh(skb->dev->npinfo); spin_lock(&npinfo->rx_lock); /* check rx_flags again with the lock held */ - if (npinfo->rx_flags && __netpoll_rx(skb)) + if (npinfo->rx_flags && __netpoll_rx(skb, npinfo)) ret = true; spin_unlock(&npinfo->rx_lock); @@ -83,13 +96,6 @@ out: return ret; } -static inline int netpoll_rx_on(struct sk_buff *skb) -{ - struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); - - return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); -} - static inline int netpoll_receive_skb(struct sk_buff *skb) { if (!list_empty(&skb->dev->napi_list)) @@ -119,7 +125,7 @@ static inline void netpoll_poll_unlock(void *have) } } -static inline int netpoll_tx_running(struct net_device *dev) +static inline bool netpoll_tx_running(struct net_device *dev) { return irqs_disabled(); } @@ -127,11 +133,11 @@ static inline int netpoll_tx_running(struct net_device *dev) #else static inline bool netpoll_rx(struct sk_buff *skb) { - return 0; + return false; } -static inline int netpoll_rx_on(struct sk_buff *skb) +static inline bool netpoll_rx_on(struct sk_buff *skb) { - return 0; + return false; } static inline int netpoll_receive_skb(struct sk_buff *skb) { @@ -147,9 +153,9 @@ static inline void netpoll_poll_unlock(void *have) static inline void netpoll_netdev_init(struct net_device *dev) { } -static inline int netpoll_tx_running(struct net_device *dev) +static inline bool netpoll_tx_running(struct net_device *dev) { - return 0; + return false; } #endif diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index ce4743a26015..fa63048fecff 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -143,6 +143,7 @@ typedef struct svc_fh { int fh_maxsize; /* max size for fh_handle */ unsigned char fh_locked; /* inode locked by us */ + unsigned char fh_want_write; /* remount protection taken */ #ifdef CONFIG_NFSD_V3 unsigned char fh_post_saved; /* post-op attrs saved */ diff --git a/include/linux/of.h b/include/linux/of.h index 5919ee33f2b7..1b1163225f3b 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -190,10 +190,17 @@ extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); +extern struct device_node *of_get_next_available_child( + const struct device_node *node, struct device_node *prev); + #define for_each_child_of_node(parent, child) \ for (child = of_get_next_child(parent, NULL); child != NULL; \ child = of_get_next_child(parent, child)) +#define for_each_available_child_of_node(parent, child) \ + for (child = of_get_next_available_child(parent, NULL); child != NULL; \ + child = of_get_next_available_child(parent, child)) + static inline int of_get_child_count(const struct device_node *np) { struct device_node *child; diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h new file mode 100644 index 000000000000..5bb6e760aa61 --- /dev/null +++ b/include/linux/olpc-ec.h @@ -0,0 +1,41 @@ +#ifndef _LINUX_OLPC_EC_H +#define _LINUX_OLPC_EC_H + +/* XO-1 EC commands */ +#define EC_FIRMWARE_REV 0x08 +#define EC_WRITE_SCI_MASK 0x1b +#define EC_WAKE_UP_WLAN 0x24 +#define EC_WLAN_LEAVE_RESET 0x25 +#define EC_READ_EB_MODE 0x2a +#define EC_SET_SCI_INHIBIT 0x32 +#define EC_SET_SCI_INHIBIT_RELEASE 0x34 +#define EC_WLAN_ENTER_RESET 0x35 +#define EC_WRITE_EXT_SCI_MASK 0x38 +#define EC_SCI_QUERY 0x84 +#define EC_EXT_SCI_QUERY 0x85 + +struct platform_device; + +struct olpc_ec_driver { + int (*probe)(struct platform_device *); + int (*suspend)(struct platform_device *); + int (*resume)(struct platform_device *); + + int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *); +}; + +#ifdef CONFIG_OLPC + +extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg); + +extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, + size_t outlen); + +#else + +static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, + size_t outlen) { return -ENODEV; } + +#endif /* CONFIG_OLPC */ + +#endif /* _LINUX_OLPC_EC_H */ diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h new file mode 100644 index 000000000000..eb475a8ea25b --- /dev/null +++ b/include/linux/omap-dma.h @@ -0,0 +1,22 @@ +/* + * OMAP DMA Engine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_OMAP_DMA_H +#define __LINUX_OMAP_DMA_H + +struct dma_chan; + +#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE) +bool omap_dma_filter_fn(struct dma_chan *, void *); +#else +static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) +{ + return false; +} +#endif + +#endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 76c5c8b724a7..7602ccb3f40e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1272,7 +1272,8 @@ static inline bool perf_paranoid_kernel(void) extern void perf_event_init(void); extern void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, struct pt_regs *regs, - struct hlist_head *head, int rctx); + struct hlist_head *head, int rctx, + struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 6dd96fb45482..e9b7f4350844 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -20,6 +20,7 @@ /* This struct is private to the core and should be regarded as a cookie */ struct pinctrl; struct pinctrl_state; +struct device; #ifdef CONFIG_PINCTRL diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index e11d1c0fc60f..ad1a427b5267 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -160,4 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); long pipe_fcntl(struct file *, unsigned int, unsigned long arg); struct pipe_inode_info *get_pipe_info(struct file *file); +int create_pipe_files(struct file **, int); + #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index c147e7024f11..b8c86648a2f9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -334,14 +334,6 @@ static inline void lockup_detector_init(void) } #endif -#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND) -void lockup_detector_bootcpu_resume(void); -#else -static inline void lockup_detector_bootcpu_resume(void) -{ -} -#endif - #ifdef CONFIG_DETECT_HUNG_TASK extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_check_count; diff --git a/include/linux/security.h b/include/linux/security.h index 4e5a73cdbbef..3dea6a9d568f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1242,8 +1242,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself * to the @parent process for tracing. - * The parent process will still have to undergo the ptrace_access_check - * checks before it is allowed to trace this one. * @parent contains the task_struct structure for debugger process. * Return 0 if permission is granted. * @capget: diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 93f9821554b6..a3728bf66f0e 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -50,6 +50,7 @@ struct shdma_desc { struct list_head node; struct dma_async_tx_descriptor async_tx; enum dma_transfer_direction direction; + size_t partial; dma_cookie_t cookie; int chunks; int mark; @@ -98,6 +99,7 @@ struct shdma_ops { void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); struct shdma_desc *(*embedded_desc)(void *, int); bool (*chan_irq)(struct shdma_chan *, int); + size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *); }; struct shdma_dev { diff --git a/include/linux/string.h b/include/linux/string.h index ffe0442e18d2..b9178812d9df 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -144,8 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } -#endif extern size_t memweight(const void *ptr, size_t bytes); +#endif /* __KERNEL__ */ #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/timex.h b/include/linux/timex.h index 99bc88b1fc02..7c5ceb20e03a 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -232,7 +232,7 @@ struct timex { * estimated error = NTP dispersion. */ extern unsigned long tick_usec; /* USER_HZ period (usec) */ -extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ +extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ extern void ntp_init(void); extern void ntp_clear(void); diff --git a/include/linux/topology.h b/include/linux/topology.h index e91cd43394df..fec12d667211 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -164,6 +164,7 @@ int arch_update_cpu_topology(void); | 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ + | 1*SD_PREFER_SIBLING \ , \ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index c66fe3332d83..50c3e8fa06a8 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -104,7 +104,6 @@ static inline void wait_on_inode(struct inode *inode) wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); } - /* * mm/page-writeback.c */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 493fa0c79005..3d254e10ff30 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -96,6 +96,7 @@ enum ieee80211_band { * is not permitted. * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel * is not permitted. + * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -104,6 +105,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_RADAR = 1<<3, IEEE80211_CHAN_NO_HT40PLUS = 1<<4, IEEE80211_CHAN_NO_HT40MINUS = 1<<5, + IEEE80211_CHAN_NO_OFDM = 1<<6, }; #define IEEE80211_CHAN_NO_HT40 \ diff --git a/include/net/codel.h b/include/net/codel.h index 550debfc2403..389cf621161d 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, } } } else if (drop) { + u32 delta; + if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { @@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ - if (codel_time_before(now - vars->drop_next, + delta = vars->count - vars->lastcount; + if (delta > 1 && + codel_time_before(now - vars->drop_next, 16 * params->interval)) { - vars->count = (vars->count - vars->lastcount) | 1; + vars->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. diff --git a/include/net/dst.h b/include/net/dst.h index baf597890064..621e3513ef5e 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -110,7 +110,7 @@ struct dst_entry { }; extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); -extern const u32 dst_default_metrics[RTAX_MAX]; +extern const u32 dst_default_metrics[]; #define DST_METRICS_READ_ONLY 0x1UL #define __DST_METRICS_PTR(Y) \ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5ee66f517b4f..ba1d3615acbb 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -39,6 +39,7 @@ struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); + void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, struct request_sock *req, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 83b567fe1941..613cfa401672 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -249,13 +249,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) return flags; } -static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; -} - #endif /* _INET_SOCK_H */ diff --git a/include/net/ip.h b/include/net/ip.h index bd5e444a19ce..5a5d84d3d2c6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -120,7 +120,7 @@ extern struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork); -extern int ip_send_skb(struct sk_buff *skb); +extern int ip_send_skb(struct net *net, struct sk_buff *skb); extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); extern void ip_flush_pending_frames(struct sock *sk); extern struct sk_buff *ip_make_skb(struct sock *sk, diff --git a/include/net/llc.h b/include/net/llc.h index 226c846cab08..f2d0fc570527 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -133,7 +133,7 @@ extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_station_init(void); +extern void llc_station_init(void); extern void llc_station_exit(void); #ifdef CONFIG_PROC_FS diff --git a/include/net/scm.h b/include/net/scm.h index 079d7887dac1..7dc0854f0b38 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -70,9 +70,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm) } static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm) + struct scm_cookie *scm, bool forcecreds) { memset(scm, 0, sizeof(*scm)); + if (forcecreds) + scm_set_cred(scm, task_tgid(current), current_cred()); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; diff --git a/include/net/sock.h b/include/net/sock.h index b3730239bf18..72132aef53fc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -218,6 +218,7 @@ struct cg_proto; * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_gso_max_size: Maximum GSO segment size to build + * @sk_gso_max_segs: Maximum number of GSO segments * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct @@ -338,6 +339,7 @@ struct sock { netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; + u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; diff --git a/include/net/tcp.h b/include/net/tcp.h index e19124b84cd2..1f000ffe7075 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -464,6 +464,7 @@ extern int tcp_disconnect(struct sock *sk, int flags); void tcp_connect_init(struct sock *sk); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d9509eb29b80..976a81abe1a2 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -213,6 +213,9 @@ struct xfrm_state { struct xfrm_lifetime_cur curlft; struct tasklet_hrtimer mtimer; + /* used to fix curlft->add_time when changing date */ + long saved_tmo; + /* Last used time */ unsigned long lastused; @@ -238,6 +241,7 @@ static inline struct net *xs_net(struct xfrm_state *x) /* xflags - make enum if more show up */ #define XFRM_TIME_DEFER 1 +#define XFRM_SOFT_EXPIRE 2 enum { XFRM_STATE_VOID, @@ -288,6 +292,8 @@ struct xfrm_policy_afinfo { struct flowi *fl, int reverse); int (*get_tos)(const struct flowi *fl); + void (*init_dst)(struct net *net, + struct xfrm_dst *dst); int (*init_path)(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len); diff --git a/include/sound/es1688.h b/include/sound/es1688.h index 3ec7ecbe2502..f752dd33dfaf 100644 --- a/include/sound/es1688.h +++ b/include/sound/es1688.h @@ -29,6 +29,7 @@ #define ES1688_HW_AUTO 0x0000 #define ES1688_HW_688 0x0001 #define ES1688_HW_1688 0x0002 +#define ES1688_HW_UNDEF 0x0003 struct snd_es1688 { unsigned long port; /* port of ESS chip */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index c75c0d1a85e2..cdca2ab1e711 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1075,7 +1075,8 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) const char *snd_pcm_format_name(snd_pcm_format_t format); /** - * Get a string naming the direction of a stream + * snd_pcm_stream_str - Get a string naming the direction of a stream + * @substream: the pcm substream instance */ static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream) { diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index ea7a2035456d..5a8671e8a67f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -73,6 +73,9 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, __entry->prio = p->prio; __entry->success = success; __entry->target_cpu = task_cpu(p); + ) + TP_perf_assign( + __perf_task(p); ), TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", @@ -325,6 +328,7 @@ DECLARE_EVENT_CLASS(sched_stat_template, ) TP_perf_assign( __perf_count(delay); + __perf_task(tsk); ), TP_printk("comm=%s pid=%d delay=%Lu [ns]", diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index c6bc2faaf261..a763888a36f9 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -712,6 +712,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call #undef __perf_count #define __perf_count(c) __count = (c) +#undef __perf_task +#define __perf_task(t) __task = (t) + #undef TP_perf_assign #define TP_perf_assign(args...) args @@ -725,6 +728,7 @@ perf_trace_##call(void *__data, proto) \ struct ftrace_raw_##call *entry; \ struct pt_regs __regs; \ u64 __addr = 0, __count = 1; \ + struct task_struct *__task = NULL; \ struct hlist_head *head; \ int __entry_size; \ int __data_size; \ @@ -752,7 +756,7 @@ perf_trace_##call(void *__data, proto) \ \ head = this_cpu_ptr(event_call->perf_events); \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, &__regs, head); \ + __count, &__regs, head, __task); \ } /* diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index 89d43b3d4cb9..5a0e4f9efb53 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h @@ -82,6 +82,9 @@ struct lcd_ctrl_config { /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */ unsigned char raster_order; + + /* DMA FIFO threshold */ + int fifo_th; }; struct lcd_sync_arg { diff --git a/include/video/omapdss.h b/include/video/omapdss.h index c8e59b4a3364..a6267a2d292b 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -48,6 +48,10 @@ #define DISPC_IRQ_FRAMEDONEWB (1 << 23) #define DISPC_IRQ_FRAMEDONETV (1 << 24) #define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) +#define DISPC_IRQ_FRAMEDONE3 (1 << 26) +#define DISPC_IRQ_VSYNC3 (1 << 27) +#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 28) +#define DISPC_IRQ_SYNC_LOST3 (1 << 29) struct omap_dss_device; struct omap_overlay_manager; @@ -75,6 +79,7 @@ enum omap_channel { OMAP_DSS_CHANNEL_LCD = 0, OMAP_DSS_CHANNEL_DIGIT = 1, OMAP_DSS_CHANNEL_LCD2 = 2, + OMAP_DSS_CHANNEL_LCD3 = 3, }; enum omap_color_mode { @@ -99,11 +104,6 @@ enum omap_color_mode { OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ }; -enum omap_lcd_display_type { - OMAP_DSS_LCD_DISPLAY_STN, - OMAP_DSS_LCD_DISPLAY_TFT, -}; - enum omap_dss_load_mode { OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, OMAP_DSS_LOAD_CLUT_ONLY = 1, @@ -121,15 +121,15 @@ enum omap_rfbi_te_mode { OMAP_DSS_RFBI_TE_MODE_2 = 2, }; -enum omap_panel_config { - OMAP_DSS_LCD_IVS = 1<<0, - OMAP_DSS_LCD_IHS = 1<<1, - OMAP_DSS_LCD_IPC = 1<<2, - OMAP_DSS_LCD_IEO = 1<<3, - OMAP_DSS_LCD_RF = 1<<4, - OMAP_DSS_LCD_ONOFF = 1<<5, +enum omap_dss_signal_level { + OMAPDSS_SIG_ACTIVE_HIGH = 0, + OMAPDSS_SIG_ACTIVE_LOW = 1, +}; - OMAP_DSS_LCD_TFT = 1<<20, +enum omap_dss_signal_edge { + OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + OMAPDSS_DRIVE_SIG_RISING_EDGE, + OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; enum omap_dss_venc_type { @@ -167,13 +167,6 @@ enum omap_dss_audio_state { OMAP_DSS_AUDIO_PLAYING, }; -/* XXX perhaps this should be removed */ -enum omap_dss_overlay_managers { - OMAP_DSS_OVL_MGR_LCD, - OMAP_DSS_OVL_MGR_TV, - OMAP_DSS_OVL_MGR_LCD2, -}; - enum omap_dss_rotation_type { OMAP_DSS_ROT_DMA = 1 << 0, OMAP_DSS_ROT_VRFB = 1 << 1, @@ -268,9 +261,6 @@ struct omap_dss_dsi_videomode_data { int hfp_blanking_mode; /* Video port sync events */ - int vp_de_pol; - int vp_hsync_pol; - int vp_vsync_pol; bool vp_vsync_end; bool vp_hsync_end; @@ -346,6 +336,19 @@ struct omap_video_timings { u16 vfp; /* Vertical front porch */ /* Unit: line clocks */ u16 vbp; /* Vertical back porch */ + + /* Vsync logic level */ + enum omap_dss_signal_level vsync_level; + /* Hsync logic level */ + enum omap_dss_signal_level hsync_level; + /* Interlaced or Progressive timings */ + bool interlace; + /* Pixel clock edge to drive LCD data */ + enum omap_dss_signal_edge data_pclk_edge; + /* Data enable logic level */ + enum omap_dss_signal_level de_level; + /* Pixel clock edges to drive HSYNC and VSYNC signals */ + enum omap_dss_signal_edge sync_pclk_edge; }; #ifdef CONFIG_OMAP2_DSS_VENC @@ -559,8 +562,6 @@ struct omap_dss_device { /* Unit: line clocks */ int acb; /* ac-bias pin frequency */ - enum omap_panel_config config; - enum omap_dss_dsi_pixel_format dsi_pix_fmt; enum omap_dss_dsi_mode dsi_mode; struct omap_dss_dsi_videomode_data dsi_vm_data; diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h index 7571b27a0ba1..ff43ffc1aab2 100644 --- a/include/video/sh_mobile_lcdc.h +++ b/include/video/sh_mobile_lcdc.h @@ -166,6 +166,12 @@ struct sh_mobile_lcdc_bl_info { int (*get_brightness)(void); }; +struct sh_mobile_lcdc_overlay_cfg { + int fourcc; + unsigned int max_xres; + unsigned int max_yres; +}; + struct sh_mobile_lcdc_chan_cfg { int chan; int fourcc; @@ -186,6 +192,7 @@ struct sh_mobile_lcdc_chan_cfg { struct sh_mobile_lcdc_info { int clock_source; struct sh_mobile_lcdc_chan_cfg ch[2]; + struct sh_mobile_lcdc_overlay_cfg overlays[4]; struct sh_mobile_meram_info *meram_dev; }; diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h index 29b2fd3b147e..062e6e7f955c 100644 --- a/include/video/sh_mobile_meram.h +++ b/include/video/sh_mobile_meram.h @@ -15,7 +15,6 @@ enum { struct sh_mobile_meram_priv; -struct sh_mobile_meram_ops; /* * struct sh_mobile_meram_info - MERAM platform data @@ -24,7 +23,6 @@ struct sh_mobile_meram_ops; struct sh_mobile_meram_info { int addr_mode; u32 reserved_icbs; - struct sh_mobile_meram_ops *ops; struct sh_mobile_meram_priv *priv; struct platform_device *pdev; }; @@ -38,26 +36,59 @@ struct sh_mobile_meram_cfg { struct sh_mobile_meram_icb_cfg icb[2]; }; -struct module; -struct sh_mobile_meram_ops { - struct module *module; - /* register usage of meram */ - void *(*meram_register)(struct sh_mobile_meram_info *meram_dev, - const struct sh_mobile_meram_cfg *cfg, - unsigned int xres, unsigned int yres, - unsigned int pixelformat, - unsigned int *pitch); - - /* unregister usage of meram */ - void (*meram_unregister)(struct sh_mobile_meram_info *meram_dev, - void *data); - - /* update meram settings */ - void (*meram_update)(struct sh_mobile_meram_info *meram_dev, void *data, +#if defined(CONFIG_FB_SH_MOBILE_MERAM) || \ + defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE) +unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, + size_t size); +void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, + unsigned long mem, size_t size); +void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, + unsigned int *pitch); +void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data); +void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c); +#else +static inline unsigned long +sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size) +{ + return 0; +} + +static inline void +sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev, + unsigned long mem, size_t size) +{ +} + +static inline void * +sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, + unsigned int *pitch) +{ + return ERR_PTR(-ENODEV); +} + +static inline void +sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data) +{ +} + +static inline void +sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data, unsigned long base_addr_y, unsigned long base_addr_c, unsigned long *icb_addr_y, - unsigned long *icb_addr_c); -}; + unsigned long *icb_addr_c) +{ +} +#endif #endif /* __VIDEO_SH_MOBILE_MERAM_H__ */ diff --git a/init/main.c b/init/main.c index e60679de61c3..b28673087ac0 100644 --- a/init/main.c +++ b/init/main.c @@ -461,10 +461,6 @@ static void __init mm_init(void) percpu_init_late(); pgtable_cache_init(); vmalloc_init(); -#ifdef CONFIG_X86 - if (efi_enabled) - efi_enter_virtual_mode(); -#endif } asmlinkage void __init start_kernel(void) @@ -606,6 +602,10 @@ asmlinkage void __init start_kernel(void) calibrate_delay(); pidmap_init(); anon_vma_init(); +#ifdef CONFIG_X86 + if (efi_enabled) + efi_enter_virtual_mode(); +#endif thread_info_cache_init(); cred_init(); fork_init(totalram_pages); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index f8e54f5b9080..9a08acc9e649 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, struct mq_attr *attr) { const struct cred *cred = current_cred(); - struct file *result; int ret; if (attr) { @@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, } mode &= ~current_umask(); - ret = mnt_want_write(path->mnt); - if (ret) - return ERR_PTR(ret); ret = vfs_create(dir, path->dentry, mode, true); path->dentry->d_fsdata = NULL; - if (!ret) - result = dentry_open(path, oflag, cred); - else - result = ERR_PTR(ret); - /* - * dentry_open() took a persistent mnt_want_write(), - * so we can now drop this one. - */ - mnt_drop_write(path->mnt); - return result; + if (ret) + return ERR_PTR(ret); + return dentry_open(path, oflag, cred); } /* Opens existing queue */ @@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr attr; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - struct dentry *root = ipc_ns->mq_mnt->mnt_root; + struct vfsmount *mnt = ipc_ns->mq_mnt; + struct dentry *root = mnt->mnt_root; + int ro; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; @@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, if (fd < 0) goto out_putname; + ro = mnt_want_write(mnt); /* we'll drop it in any case */ error = 0; mutex_lock(&root->d_inode->i_mutex); path.dentry = lookup_one_len(name, root, strlen(name)); @@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, error = PTR_ERR(path.dentry); goto out_putfd; } - path.mnt = mntget(ipc_ns->mq_mnt); + path.mnt = mntget(mnt); if (oflag & O_CREAT) { if (path.dentry->d_inode) { /* entry already exists */ @@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, } filp = do_open(&path, oflag); } else { + if (ro) { + error = ro; + goto out; + } filp = do_create(ipc_ns, root->d_inode, &path, oflag, mode, u_attr ? &attr : NULL); @@ -845,6 +841,7 @@ out_putfd: fd = error; } mutex_unlock(&root->d_inode->i_mutex); + mnt_drop_write(mnt); out_putname: putname(name); return fd; @@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) struct dentry *dentry; struct inode *inode = NULL; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); - mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, - I_MUTEX_PARENT); - dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); + err = mnt_want_write(mnt); + if (err) + goto out_name; + mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(name, mnt->mnt_root, strlen(name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } - if (!dentry->d_inode) { - err = -ENOENT; - goto out_err; - } - inode = dentry->d_inode; - if (inode) + if (!inode) { + err = -ENOENT; + } else { ihold(inode); - err = mnt_want_write(ipc_ns->mq_mnt); - if (err) - goto out_err; - err = vfs_unlink(dentry->d_parent->d_inode, dentry); - mnt_drop_write(ipc_ns->mq_mnt); -out_err: + err = vfs_unlink(dentry->d_parent->d_inode, dentry); + } dput(dentry); out_unlock: - mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); - putname(name); + mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); if (inode) iput(inode); + mnt_drop_write(mnt); +out_name: + putname(name); return err; } diff --git a/kernel/audit.c b/kernel/audit.c index 4a3f28d2ca65..ea3b7b6191c7 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1456,6 +1456,27 @@ void audit_log_key(struct audit_buffer *ab, char *key) } /** + * audit_log_link_denied - report a link restriction denial + * @operation: specific link opreation + * @link: the path that triggered the restriction + */ +void audit_log_link_denied(const char *operation, struct path *link) +{ + struct audit_buffer *ab; + + ab = audit_log_start(current->audit_context, GFP_KERNEL, + AUDIT_ANOM_LINK); + audit_log_format(ab, "op=%s action=denied", operation); + audit_log_format(ab, " pid=%d comm=", current->pid); + audit_log_untrustedstring(ab, current->comm); + audit_log_d_path(ab, " path=", link); + audit_log_format(ab, " dev="); + audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id); + audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino); + audit_log_end(ab); +} + +/** * audit_log_end - end one audit record * @ab: the audit_buffer * diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 3a5ca582ba1e..ed206fd88cca 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -250,7 +250,6 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); goto out; } @@ -259,7 +258,7 @@ static void untag_chunk(struct node *p) fsnotify_duplicate_mark(&new->mark, entry); if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { - free_chunk(new); + fsnotify_put_mark(&new->mark); goto Fallback; } @@ -293,7 +292,7 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); + fsnotify_put_mark(&new->mark); /* drop initial reference */ goto out; Fallback: @@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) entry = &chunk->mark; if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { - free_chunk(chunk); + fsnotify_put_mark(entry); return -ENOSPC; } @@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) insert_hash(chunk); spin_unlock(&hash_lock); spin_unlock(&entry->lock); + fsnotify_put_mark(entry); /* drop initial reference */ return 0; } @@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) fsnotify_duplicate_mark(chunk_entry, old_entry); if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { spin_unlock(&old_entry->lock); - free_chunk(chunk); + fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; } @@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); fsnotify_destroy_mark(old_entry); + fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ - fsnotify_put_mark(old_entry); /* and kill it */ return 0; } @@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); evict_chunk(chunk); - fsnotify_put_mark(entry); + + /* + * We are guaranteed to have at least one reference to the mark from + * either the inode or the caller of fsnotify_destroy_mark(). + */ + BUG_ON(atomic_read(&entry->refcnt) < 1); } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index 8b68ce78ff17..be7b33b73d30 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c @@ -12,6 +12,7 @@ #include <linux/kdb.h> #include <linux/kdebug.h> #include <linux/export.h> +#include <linux/hardirq.h> #include "kdb_private.h" #include "../debug_core.h" @@ -52,6 +53,9 @@ int kdb_stub(struct kgdb_state *ks) if (atomic_read(&kgdb_setting_breakpoint)) reason = KDB_REASON_KEYBOARD; + if (in_nmi()) + reason = KDB_REASON_NMI; + for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if ((bp->bp_enabled) && (bp->bp_addr == addr)) { reason = KDB_REASON_BREAK; diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index bb9520f0f6ff..0a69d2adc4f3 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -715,9 +715,6 @@ kdb_printit: /* check for having reached the LINES number of printed lines */ if (kdb_nextline == linecount) { char buf1[16] = ""; -#if defined(CONFIG_SMP) - char buf2[32]; -#endif /* Watch out for recursion here. Any routine that calls * kdb_printf will come back through here. And kdb_read @@ -732,14 +729,6 @@ kdb_printit: if (moreprompt == NULL) moreprompt = "more> "; -#if defined(CONFIG_SMP) - if (strchr(moreprompt, '%')) { - sprintf(buf2, moreprompt, get_cpu()); - put_cpu(); - moreprompt = buf2; - } -#endif - kdb_input_flush(); c = console_drivers; diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 1f91413edb87..31df1706b9a9 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -139,11 +139,10 @@ static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t); static char *__env[] = { #if defined(CONFIG_SMP) "PROMPT=[%d]kdb> ", - "MOREPROMPT=[%d]more> ", #else "PROMPT=kdb> ", - "MOREPROMPT=more> ", #endif + "MOREPROMPT=more> ", "RADIX=16", "MDCOUNT=8", /* lines of md output */ KDB_PLATFORM_ENV, @@ -1236,18 +1235,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, *cmdbuf = '\0'; *(cmd_hist[cmd_head]) = '\0'; - if (KDB_FLAG(ONLY_DO_DUMP)) { - /* kdb is off but a catastrophic error requires a dump. - * Take the dump and reboot. - * Turn on logging so the kdb output appears in the log - * buffer in the dump. - */ - const char *setargs[] = { "set", "LOGGING", "1" }; - kdb_set(2, setargs); - kdb_reboot(0, NULL); - /*NOTREACHED*/ - } - do_full_getstr: #if defined(CONFIG_SMP) snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 6581a040f399..98d4597f43d6 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -153,7 +153,8 @@ put_callchain_entry(int rctx) put_recursion_context(__get_cpu_var(callchain_recursion), rctx); } -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +struct perf_callchain_entry * +perf_callchain(struct perf_event *event, struct pt_regs *regs) { int rctx; struct perf_callchain_entry *entry; @@ -178,6 +179,12 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) } if (regs) { + /* + * Disallow cross-task user callchains. + */ + if (event->ctx->task && event->ctx->task != current) + goto exit_put; + perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_user(entry, regs); } diff --git a/kernel/events/core.c b/kernel/events/core.c index f1cf0edeb39a..b7935fcec7d9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4039,7 +4039,7 @@ void perf_prepare_sample(struct perf_event_header *header, if (sample_type & PERF_SAMPLE_CALLCHAIN) { int size = 1; - data->callchain = perf_callchain(regs); + data->callchain = perf_callchain(event, regs); if (data->callchain) size += data->callchain->nr; @@ -5209,7 +5209,8 @@ static int perf_tp_event_match(struct perf_event *event, } void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, - struct pt_regs *regs, struct hlist_head *head, int rctx) + struct pt_regs *regs, struct hlist_head *head, int rctx, + struct task_struct *task) { struct perf_sample_data data; struct perf_event *event; @@ -5228,6 +5229,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, perf_swevent_event(event, count, &data, regs); } + /* + * If we got specified a target task, also iterate its context and + * deliver this event there too. + */ + if (task && task != current) { + struct perf_event_context *ctx; + struct trace_entry *entry = record; + + rcu_read_lock(); + ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); + if (!ctx) + goto unlock; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (event->attr.type != PERF_TYPE_TRACEPOINT) + continue; + if (event->attr.config != entry->type) + continue; + if (perf_tp_event_match(event, &data, regs)) + perf_swevent_event(event, count, &data, regs); + } +unlock: + rcu_read_unlock(); + } + perf_swevent_put_recursion_context(rctx); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/events/internal.h b/kernel/events/internal.h index b0b107f90afc..a096c19f2c2a 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -101,7 +101,8 @@ __output_copy(struct perf_output_handle *handle, } /* Callchain handling */ -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); +extern struct perf_callchain_entry * +perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(void); extern void put_callchain_buffers(void); diff --git a/kernel/futex.c b/kernel/futex.c index e2b0fb9a0b3b..3717e7b306e0 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to - * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and - * complete the acquisition of the rt_mutex prior to returning to userspace. - * This ensures the rt_mutex maintains an owner when it has waiters; without - * one, the pi logic wouldn't know which task to boost/deboost, if there was a - * need to. + * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake + * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to + * userspace. This ensures the rt_mutex maintains an owner when it has waiters; + * without one, the pi logic would not know which task to boost/deboost, if + * there was a need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there * via the following: @@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, struct futex_q q = futex_q_init; int res, ret; + if (uaddr == uaddr2) + return -EINVAL; + if (!bitset) return -EINVAL; @@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ - WARN_ON(!&q.pi_state); + WARN_ON(!q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); @@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { - if (rt_mutex_owner(pi_mutex) == current) + if (pi_mutex && rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a8e8f059627..4c69326aa773 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -944,6 +944,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) } /* + * Drivers are often written to work w/o knowledge about the + * underlying irq chip implementation, so a request for a + * threaded irq without a primary hard irq context handler + * requires the ONESHOT flag to be set. Some irq chips like + * MSI based interrupts are per se one shot safe. Check the + * chip flags, so we can avoid the unmask dance at the end of + * the threaded handler for those. + */ + if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) + new->flags &= ~IRQF_ONESHOT; + + /* * The following block of code has to be executed atomically */ raw_spin_lock_irqsave(&desc->lock, flags); @@ -1017,7 +1029,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ new->thread_mask = 1 << ffz(thread_mask); - } else if (new->handler == irq_default_primary_handler) { + } else if (new->handler == irq_default_primary_handler && + !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { /* * The interrupt was requested with handler = NULL, so * we use the default primary handler for it. But it diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 1da39ea248fd..c8b7446b27df 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -178,9 +178,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); - /* Kick the lockup detector */ - lockup_detector_bootcpu_resume(); - Enable_cpus: enable_nonboot_cpus(); diff --git a/kernel/printk.c b/kernel/printk.c index 6a76ab9d4476..66a2ea37b576 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct log *msg = log_from_idx(idx); len += msg_print_text(msg, prev, true, NULL, 0); + prev = msg->flags; idx = log_next(idx); seq++; } @@ -1046,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct log *msg = log_from_idx(idx); len -= msg_print_text(msg, prev, true, NULL, 0); + prev = msg->flags; idx = log_next(idx); seq++; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d325c4b2dcbb..fbf1fd098dc6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) #endif +static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) +{ + u64 temp = (__force u64) rtime; + + temp *= (__force u64) utime; + + if (sizeof(cputime_t) == 4) + temp = div_u64(temp, (__force u32) total); + else + temp = div64_u64(temp, (__force u64) total); + + return (__force cputime_t) temp; +} + void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { cputime_t rtime, utime = p->utime, total = utime + p->stime; @@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) */ rtime = nsecs_to_cputime(p->se.sum_exec_runtime); - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else + if (total) + utime = scale_utime(utime, rtime, total); + else utime = rtime; /* @@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) total = cputime.utime + cputime.stime; rtime = nsecs_to_cputime(cputime.sum_exec_runtime); - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) cputime.utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else + if (total) + utime = scale_utime(cputime.utime, rtime, total); + else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime); @@ -4340,9 +4346,7 @@ recheck: */ if (unlikely(policy == p->policy && (!rt_policy(policy) || param->sched_priority == p->rt_priority))) { - - __task_rq_unlock(rq); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + task_rq_unlock(rq, p, &flags); return 0; } @@ -7248,6 +7252,7 @@ int in_sched_functions(unsigned long addr) #ifdef CONFIG_CGROUP_SCHED struct task_group root_task_group; +LIST_HEAD(task_groups); #endif DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index d72586fdf660..23aa789c53ee 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -65,8 +65,8 @@ static int convert_prio(int prio) int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { - int idx = 0; - int task_pri = convert_prio(p->prio); + int idx = 0; + int task_pri = convert_prio(p->prio); if (task_pri >= MAX_RT_PRIO) return 0; @@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, */ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { - int *currpri = &cp->cpu_to_pri[cpu]; - int oldpri = *currpri; - int do_mb = 0; + int *currpri = &cp->cpu_to_pri[cpu]; + int oldpri = *currpri; + int do_mb = 0; newpri = convert_prio(newpri); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 22321db64952..c219bf8d704c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3069,6 +3069,9 @@ struct lb_env { int new_dst_cpu; enum cpu_idle_type idle; long imbalance; + /* The set of CPUs under consideration for load-balancing */ + struct cpumask *cpus; + unsigned int flags; unsigned int loop; @@ -3384,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data) static void update_h_load(long cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long now = jiffies; + + if (rq->h_load_throttle == now) + return; + + rq->h_load_throttle = now; + rcu_read_lock(); walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); rcu_read_unlock(); @@ -3653,8 +3664,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) */ static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, int load_idx, - int local_group, const struct cpumask *cpus, - int *balance, struct sg_lb_stats *sgs) + int local_group, int *balance, struct sg_lb_stats *sgs) { unsigned long nr_running, max_nr_running, min_nr_running; unsigned long load, max_cpu_load, min_cpu_load; @@ -3671,7 +3681,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, max_nr_running = 0; min_nr_running = ~0UL; - for_each_cpu_and(i, sched_group_cpus(group), cpus) { + for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { struct rq *rq = cpu_rq(i); nr_running = rq->nr_running; @@ -3800,8 +3810,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, * @sds: variable to hold the statistics for this sched_domain. */ static inline void update_sd_lb_stats(struct lb_env *env, - const struct cpumask *cpus, - int *balance, struct sd_lb_stats *sds) + int *balance, struct sd_lb_stats *sds) { struct sched_domain *child = env->sd->child; struct sched_group *sg = env->sd->groups; @@ -3818,8 +3827,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); memset(&sgs, 0, sizeof(sgs)); - update_sg_lb_stats(env, sg, load_idx, local_group, - cpus, balance, &sgs); + update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs); if (local_group && !(*balance)) return; @@ -4055,7 +4063,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * to restore balance. * * @env: The load balancing environment. - * @cpus: The set of CPUs under consideration for load-balancing. * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * @@ -4065,7 +4072,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * put to idle by rebalancing its tasks onto our group. */ static struct sched_group * -find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) +find_busiest_group(struct lb_env *env, int *balance) { struct sd_lb_stats sds; @@ -4075,7 +4082,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) * Compute the various statistics relavent for load balancing at * this level. */ - update_sd_lb_stats(env, cpus, balance, &sds); + update_sd_lb_stats(env, balance, &sds); /* * this_cpu is not the appropriate cpu to perform load balancing at @@ -4155,8 +4162,7 @@ ret: * find_busiest_queue - find the busiest runqueue among the cpus in group. */ static struct rq *find_busiest_queue(struct lb_env *env, - struct sched_group *group, - const struct cpumask *cpus) + struct sched_group *group) { struct rq *busiest = NULL, *rq; unsigned long max_load = 0; @@ -4171,7 +4177,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, if (!capacity) capacity = fix_small_capacity(env->sd, group); - if (!cpumask_test_cpu(i, cpus)) + if (!cpumask_test_cpu(i, env->cpus)) continue; rq = cpu_rq(i); @@ -4252,6 +4258,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, .dst_grpmask = sched_group_cpus(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, + .cpus = cpus, }; cpumask_copy(cpus, cpu_active_mask); @@ -4260,7 +4267,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, schedstat_inc(sd, lb_count[idle]); redo: - group = find_busiest_group(&env, cpus, balance); + group = find_busiest_group(&env, balance); if (*balance == 0) goto out_balanced; @@ -4270,7 +4277,7 @@ redo: goto out_balanced; } - busiest = find_busiest_queue(&env, group, cpus); + busiest = find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; @@ -4294,11 +4301,10 @@ redo: env.src_rq = busiest; env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); + update_h_load(env.src_cpu); more_balance: local_irq_save(flags); double_rq_lock(this_rq, busiest); - if (!env.loop) - update_h_load(env.src_cpu); /* * cur_ld_moved - load moved in current iteration diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 573e1ca01102..944cb68420e9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) const struct cpumask *span; span = sched_rt_period_mask(); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * FIXME: isolated CPUs should really leave the root task group, + * whether they are isolcpus or were isolated via cpusets, lest + * the timer run on a CPU which does not service all runqueues, + * potentially leaving other CPUs indefinitely throttled. If + * isolation is really required, the user will turn the throttle + * off to kill the perturbations it causes anyway. Meanwhile, + * this maintains functionality for boot and/or troubleshooting. + */ + if (rt_b == &root_task_group.rt_bandwidth) + span = cpu_online_mask; +#endif for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c35a1a7dd4d6..f6714d009e77 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex; struct cfs_rq; struct rt_rq; -static LIST_HEAD(task_groups); +extern struct list_head task_groups; struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH @@ -374,7 +374,11 @@ struct rq { #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ struct list_head leaf_cfs_rq_list; -#endif +#ifdef CONFIG_SMP + unsigned long h_load_throttle; +#endif /* CONFIG_SMP */ +#endif /* CONFIG_FAIR_GROUP_SCHED */ + #ifdef CONFIG_RT_GROUP_SCHED struct list_head leaf_rt_rq_list; #endif diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 7b386e86fd23..da5eb5bed84a 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; - if (stop && stop->on_rq) + if (stop && stop->on_rq) { + stop->se.exec_start = rq->clock_task; return stop; + } return NULL; } @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq) static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { + struct task_struct *curr = rq->curr; + u64 delta_exec; + + delta_exec = rq->clock_task - curr->se.exec_start; + if (unlikely((s64)delta_exec < 0)) + delta_exec = 0; + + schedstat_set(curr->se.statistics.exec_max, + max(curr->se.statistics.exec_max, delta_exec)); + + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + + curr->se.exec_start = rq->clock_task; + cpuacct_charge(curr, delta_exec); } static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) static void set_curr_task_stop(struct rq *rq) { + struct task_struct *stop = rq->stop; + + stop->se.exec_start = rq->clock_task; } static void switched_to_stop(struct rq *rq, struct task_struct *p) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6502d35a25ba..87174ef59161 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1498,6 +1498,24 @@ static struct ctl_table fs_table[] = { #endif #endif { + .procname = "protected_symlinks", + .data = &sysctl_protected_symlinks, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "protected_hardlinks", + .data = &sysctl_protected_hardlinks, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { .procname = "suid_dumpable", .data = &suid_dumpable, .maxlen = sizeof(int), diff --git a/kernel/task_work.c b/kernel/task_work.c index 91d4e1742a0c..d320d44903bd 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -75,6 +75,7 @@ void task_work_run(void) p = q->next; q->func(q); q = p; + cond_resched(); } } } diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index a470154e0408..46da0537c10b 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -37,7 +37,7 @@ * requested HZ value. It is also not recommended * for "tick-less" systems. */ -#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) +#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ)) /* Since jiffies uses a simple NSEC_PER_JIFFY multiplier * conversion, the .shift value could be zero. However diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index b7fbadc5c973..24174b4d669b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -28,7 +28,7 @@ DEFINE_SPINLOCK(ntp_lock); /* USER_HZ period (usecs): */ unsigned long tick_usec = TICK_USEC; -/* ACTHZ period (nsecs): */ +/* SHIFTED_HZ period (nsecs): */ unsigned long tick_nsec; static u64 tick_length; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f045cc50832d..e16af197a2bc 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -65,14 +65,14 @@ struct timekeeper { * used instead. */ struct timespec wall_to_monotonic; - /* time spent in suspend */ - struct timespec total_sleep_time; - /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ - struct timespec raw_time; /* Offset clock monotonic -> clock realtime */ ktime_t offs_real; + /* time spent in suspend */ + struct timespec total_sleep_time; /* Offset clock monotonic -> clock boottime */ ktime_t offs_boot; + /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ + struct timespec raw_time; /* Seqlock for all timekeeper values */ seqlock_t lock; }; @@ -108,13 +108,38 @@ static struct timespec tk_xtime(struct timekeeper *tk) static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) { tk->xtime_sec = ts->tv_sec; - tk->xtime_nsec = ts->tv_nsec << tk->shift; + tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; } static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) { tk->xtime_sec += ts->tv_sec; - tk->xtime_nsec += ts->tv_nsec << tk->shift; + tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; +} + +static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) +{ + struct timespec tmp; + + /* + * Verify consistency of: offset_real = -wall_to_monotonic + * before modifying anything + */ + set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec, + -tk->wall_to_monotonic.tv_nsec); + WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64); + tk->wall_to_monotonic = wtm; + set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); + tk->offs_real = timespec_to_ktime(tmp); +} + +static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) +{ + /* Verify consistency before modifying */ + WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64); + + tk->total_sleep_time = t; + tk->offs_boot = timespec_to_ktime(t); } /** @@ -217,14 +242,6 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) return nsec + arch_gettimeoffset(); } -static void update_rt_offset(struct timekeeper *tk) -{ - struct timespec tmp, *wtm = &tk->wall_to_monotonic; - - set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); - tk->offs_real = timespec_to_ktime(tmp); -} - /* must hold write on timekeeper.lock */ static void timekeeping_update(struct timekeeper *tk, bool clearntp) { @@ -234,12 +251,10 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp) tk->ntp_error = 0; ntp_clear(); } - update_rt_offset(tk); xt = tk_xtime(tk); update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); } - /** * timekeeping_forward_now - update clock to the current time * @@ -277,18 +292,19 @@ static void timekeeping_forward_now(struct timekeeper *tk) */ void getnstimeofday(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; unsigned long seq; s64 nsecs = 0; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); + ts->tv_sec = tk->xtime_sec; + ts->tv_nsec = timekeeping_get_ns(tk); - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts, nsecs); } @@ -296,19 +312,18 @@ EXPORT_SYMBOL(getnstimeofday); ktime_t ktime_get(void) { + struct timekeeper *tk = &timekeeper; unsigned int seq; s64 secs, nsecs; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); - secs = timekeeper.xtime_sec + - timekeeper.wall_to_monotonic.tv_sec; - nsecs = timekeeping_get_ns(&timekeeper) + - timekeeper.wall_to_monotonic.tv_nsec; + seq = read_seqbegin(&tk->lock); + secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); /* * Use ktime_set/ktime_add_ns to create a proper ktime on * 32-bit architectures without CONFIG_KTIME_SCALAR. @@ -327,18 +342,19 @@ EXPORT_SYMBOL_GPL(ktime_get); */ void ktime_get_ts(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; struct timespec tomono; unsigned int seq; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); - tomono = timekeeper.wall_to_monotonic; + seq = read_seqbegin(&tk->lock); + ts->tv_sec = tk->xtime_sec; + ts->tv_nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, ts->tv_nsec + tomono.tv_nsec); @@ -358,22 +374,23 @@ EXPORT_SYMBOL_GPL(ktime_get_ts); */ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) { + struct timekeeper *tk = &timekeeper; unsigned long seq; s64 nsecs_raw, nsecs_real; WARN_ON_ONCE(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - *ts_raw = timekeeper.raw_time; - ts_real->tv_sec = timekeeper.xtime_sec; + *ts_raw = tk->raw_time; + ts_real->tv_sec = tk->xtime_sec; ts_real->tv_nsec = 0; - nsecs_raw = timekeeping_get_ns_raw(&timekeeper); - nsecs_real = timekeeping_get_ns(&timekeeper); + nsecs_raw = timekeeping_get_ns_raw(tk); + nsecs_real = timekeeping_get_ns(tk); - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts_raw, nsecs_raw); timespec_add_ns(ts_real, nsecs_real); @@ -406,28 +423,28 @@ EXPORT_SYMBOL(do_gettimeofday); */ int do_settimeofday(const struct timespec *tv) { + struct timekeeper *tk = &timekeeper; struct timespec ts_delta, xt; unsigned long flags; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); - xt = tk_xtime(&timekeeper); + xt = tk_xtime(tk); ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; - timekeeper.wall_to_monotonic = - timespec_sub(timekeeper.wall_to_monotonic, ts_delta); + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta)); - tk_set_xtime(&timekeeper, tv); + tk_set_xtime(tk, tv); - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -436,7 +453,6 @@ int do_settimeofday(const struct timespec *tv) } EXPORT_SYMBOL(do_settimeofday); - /** * timekeeping_inject_offset - Adds or subtracts from the current time. * @tv: pointer to the timespec variable containing the offset @@ -445,23 +461,23 @@ EXPORT_SYMBOL(do_settimeofday); */ int timekeeping_inject_offset(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; unsigned long flags; if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); - tk_xtime_add(&timekeeper, ts); - timekeeper.wall_to_monotonic = - timespec_sub(timekeeper.wall_to_monotonic, *ts); + tk_xtime_add(tk, ts); + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -477,23 +493,24 @@ EXPORT_SYMBOL(timekeeping_inject_offset); */ static int change_clocksource(void *data) { + struct timekeeper *tk = &timekeeper; struct clocksource *new, *old; unsigned long flags; new = (struct clocksource *) data; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); if (!new->enable || new->enable(new) == 0) { - old = timekeeper.clock; - tk_setup_internals(&timekeeper, new); + old = tk->clock; + tk_setup_internals(tk, new); if (old->disable) old->disable(old); } - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); return 0; } @@ -507,7 +524,9 @@ static int change_clocksource(void *data) */ void timekeeping_notify(struct clocksource *clock) { - if (timekeeper.clock == clock) + struct timekeeper *tk = &timekeeper; + + if (tk->clock == clock) return; stop_machine(change_clocksource, clock, NULL); tick_clock_notify(); @@ -536,35 +555,36 @@ EXPORT_SYMBOL_GPL(ktime_get_real); */ void getrawmonotonic(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; unsigned long seq; s64 nsecs; do { - seq = read_seqbegin(&timekeeper.lock); - nsecs = timekeeping_get_ns_raw(&timekeeper); - *ts = timekeeper.raw_time; + seq = read_seqbegin(&tk->lock); + nsecs = timekeeping_get_ns_raw(tk); + *ts = tk->raw_time; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts, nsecs); } EXPORT_SYMBOL(getrawmonotonic); - /** * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres */ int timekeeping_valid_for_hres(void) { + struct timekeeper *tk = &timekeeper; unsigned long seq; int ret; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); return ret; } @@ -574,15 +594,16 @@ int timekeeping_valid_for_hres(void) */ u64 timekeeping_max_deferment(void) { + struct timekeeper *tk = &timekeeper; unsigned long seq; u64 ret; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - ret = timekeeper.clock->max_idle_ns; + ret = tk->clock->max_idle_ns; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); return ret; } @@ -622,46 +643,43 @@ void __attribute__((weak)) read_boot_clock(struct timespec *ts) */ void __init timekeeping_init(void) { + struct timekeeper *tk = &timekeeper; struct clocksource *clock; unsigned long flags; - struct timespec now, boot; + struct timespec now, boot, tmp; read_persistent_clock(&now); read_boot_clock(&boot); - seqlock_init(&timekeeper.lock); + seqlock_init(&tk->lock); ntp_init(); - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); - tk_setup_internals(&timekeeper, clock); + tk_setup_internals(tk, clock); - tk_set_xtime(&timekeeper, &now); - timekeeper.raw_time.tv_sec = 0; - timekeeper.raw_time.tv_nsec = 0; + tk_set_xtime(tk, &now); + tk->raw_time.tv_sec = 0; + tk->raw_time.tv_nsec = 0; if (boot.tv_sec == 0 && boot.tv_nsec == 0) - boot = tk_xtime(&timekeeper); - - set_normalized_timespec(&timekeeper.wall_to_monotonic, - -boot.tv_sec, -boot.tv_nsec); - update_rt_offset(&timekeeper); - timekeeper.total_sleep_time.tv_sec = 0; - timekeeper.total_sleep_time.tv_nsec = 0; - write_sequnlock_irqrestore(&timekeeper.lock, flags); + boot = tk_xtime(tk); + + set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec); + tk_set_wall_to_mono(tk, tmp); + + tmp.tv_sec = 0; + tmp.tv_nsec = 0; + tk_set_sleep_time(tk, tmp); + + write_sequnlock_irqrestore(&tk->lock, flags); } /* time in seconds when suspend began */ static struct timespec timekeeping_suspend_time; -static void update_sleep_time(struct timespec t) -{ - timekeeper.total_sleep_time = t; - timekeeper.offs_boot = timespec_to_ktime(t); -} - /** * __timekeeping_inject_sleeptime - Internal function to add sleep interval * @delta: pointer to a timespec delta value @@ -677,13 +695,11 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, "sleep delta value!\n"); return; } - tk_xtime_add(tk, delta); - tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta); - update_sleep_time(timespec_add(tk->total_sleep_time, *delta)); + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); + tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); } - /** * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values * @delta: pointer to a timespec delta value @@ -696,6 +712,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, */ void timekeeping_inject_sleeptime(struct timespec *delta) { + struct timekeeper *tk = &timekeeper; unsigned long flags; struct timespec ts; @@ -704,21 +721,20 @@ void timekeeping_inject_sleeptime(struct timespec *delta) if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) return; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); - __timekeeping_inject_sleeptime(&timekeeper, delta); + __timekeeping_inject_sleeptime(tk, delta); - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); } - /** * timekeeping_resume - Resumes the generic timekeeping subsystem. * @@ -728,6 +744,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta) */ static void timekeeping_resume(void) { + struct timekeeper *tk = &timekeeper; unsigned long flags; struct timespec ts; @@ -735,18 +752,18 @@ static void timekeeping_resume(void) clocksource_resume(); - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); - __timekeeping_inject_sleeptime(&timekeeper, &ts); + __timekeeping_inject_sleeptime(tk, &ts); } /* re-base the last cycle value */ - timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); - timekeeper.ntp_error = 0; + tk->clock->cycle_last = tk->clock->read(tk->clock); + tk->ntp_error = 0; timekeeping_suspended = 0; - timekeeping_update(&timekeeper, false); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + timekeeping_update(tk, false); + write_sequnlock_irqrestore(&tk->lock, flags); touch_softlockup_watchdog(); @@ -758,14 +775,15 @@ static void timekeeping_resume(void) static int timekeeping_suspend(void) { + struct timekeeper *tk = &timekeeper; unsigned long flags; struct timespec delta, delta_delta; static struct timespec old_delta; read_persistent_clock(&timekeeping_suspend_time); - write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); + write_seqlock_irqsave(&tk->lock, flags); + timekeeping_forward_now(tk); timekeeping_suspended = 1; /* @@ -774,7 +792,7 @@ static int timekeeping_suspend(void) * try to compensate so the difference in system time * and persistent_clock time stays close to constant. */ - delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); + delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time); delta_delta = timespec_sub(delta, old_delta); if (abs(delta_delta.tv_sec) >= 2) { /* @@ -787,7 +805,7 @@ static int timekeeping_suspend(void) timekeeping_suspend_time = timespec_add(timekeeping_suspend_time, delta_delta); } - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); @@ -898,27 +916,29 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) * the error. This causes the likely below to be unlikely. * * The proper fix is to avoid rounding up by using - * the high precision timekeeper.xtime_nsec instead of + * the high precision tk->xtime_nsec instead of * xtime.tv_nsec everywhere. Fixing this will take some * time. */ if (likely(error <= interval)) adj = 1; else - adj = timekeeping_bigadjust(tk, error, &interval, - &offset); - } else if (error < -interval) { - /* See comment above, this is just switched for the negative */ - error >>= 2; - if (likely(error >= -interval)) { - adj = -1; - interval = -interval; - offset = -offset; - } else - adj = timekeeping_bigadjust(tk, error, &interval, - &offset); - } else - return; + adj = timekeeping_bigadjust(tk, error, &interval, &offset); + } else { + if (error < -interval) { + /* See comment above, this is just switched for the negative */ + error >>= 2; + if (likely(error >= -interval)) { + adj = -1; + interval = -interval; + offset = -offset; + } else { + adj = timekeeping_bigadjust(tk, error, &interval, &offset); + } + } else { + goto out_adjust; + } + } if (unlikely(tk->clock->maxadj && (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { @@ -981,6 +1001,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) tk->xtime_nsec -= offset; tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; +out_adjust: /* * It may be possible that when we entered this function, xtime_nsec * was very small. Further, if we're slightly speeding the clocksource @@ -1003,7 +1024,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) } - /** * accumulate_nsecs_to_secs - Accumulates nsecs into secs * @@ -1024,15 +1044,21 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) /* Figure out if its a leap sec and apply if needed */ leap = second_overflow(tk->xtime_sec); - tk->xtime_sec += leap; - tk->wall_to_monotonic.tv_sec -= leap; - if (leap) - clock_was_set_delayed(); + if (unlikely(leap)) { + struct timespec ts; + + tk->xtime_sec += leap; + + ts.tv_sec = leap; + ts.tv_nsec = 0; + tk_set_wall_to_mono(tk, + timespec_sub(tk->wall_to_monotonic, ts)); + clock_was_set_delayed(); + } } } - /** * logarithmic_accumulation - shifted accumulation of cycles * @@ -1076,7 +1102,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, return offset; } - /** * update_wall_time - Uses the current clocksource to increment the wall time * @@ -1084,21 +1109,22 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, static void update_wall_time(void) { struct clocksource *clock; + struct timekeeper *tk = &timekeeper; cycle_t offset; int shift = 0, maxshift; unsigned long flags; s64 remainder; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) goto out; - clock = timekeeper.clock; + clock = tk->clock; #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET - offset = timekeeper.cycle_interval; + offset = tk->cycle_interval; #else offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif @@ -1111,19 +1137,19 @@ static void update_wall_time(void) * chunk in one go, and then try to consume the next smaller * doubled multiple. */ - shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); + shift = ilog2(offset) - ilog2(tk->cycle_interval); shift = max(0, shift); /* Bound shift to one less than what overflows tick_length */ maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; shift = min(shift, maxshift); - while (offset >= timekeeper.cycle_interval) { - offset = logarithmic_accumulation(&timekeeper, offset, shift); - if(offset < timekeeper.cycle_interval<<shift) + while (offset >= tk->cycle_interval) { + offset = logarithmic_accumulation(tk, offset, shift); + if (offset < tk->cycle_interval<<shift) shift--; } /* correct the clock when NTP error is too big */ - timekeeping_adjust(&timekeeper, offset); + timekeeping_adjust(tk, offset); /* @@ -1135,21 +1161,21 @@ static void update_wall_time(void) * the vsyscall implementations are converted to use xtime_nsec * (shifted nanoseconds), this can be killed. */ - remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1); - timekeeper.xtime_nsec -= remainder; - timekeeper.xtime_nsec += 1 << timekeeper.shift; - timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift; + remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); + tk->xtime_nsec -= remainder; + tk->xtime_nsec += 1 << tk->shift; + tk->ntp_error += remainder << tk->ntp_error_shift; /* * Finally, make sure that after the rounding * xtime_nsec isn't larger than NSEC_PER_SEC */ - accumulate_nsecs_to_secs(&timekeeper); + accumulate_nsecs_to_secs(tk); - timekeeping_update(&timekeeper, false); + timekeeping_update(tk, false); out: - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); } @@ -1166,18 +1192,18 @@ out: */ void getboottime(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; struct timespec boottime = { - .tv_sec = timekeeper.wall_to_monotonic.tv_sec + - timekeeper.total_sleep_time.tv_sec, - .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + - timekeeper.total_sleep_time.tv_nsec + .tv_sec = tk->wall_to_monotonic.tv_sec + + tk->total_sleep_time.tv_sec, + .tv_nsec = tk->wall_to_monotonic.tv_nsec + + tk->total_sleep_time.tv_nsec }; set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } EXPORT_SYMBOL_GPL(getboottime); - /** * get_monotonic_boottime - Returns monotonic time since boot * @ts: pointer to the timespec to be set @@ -1189,19 +1215,20 @@ EXPORT_SYMBOL_GPL(getboottime); */ void get_monotonic_boottime(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; struct timespec tomono, sleep; unsigned int seq; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); - tomono = timekeeper.wall_to_monotonic; - sleep = timekeeper.total_sleep_time; + seq = read_seqbegin(&tk->lock); + ts->tv_sec = tk->xtime_sec; + ts->tv_nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; + sleep = tk->total_sleep_time; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); @@ -1231,31 +1258,38 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime); */ void monotonic_to_bootbased(struct timespec *ts) { - *ts = timespec_add(*ts, timekeeper.total_sleep_time); + struct timekeeper *tk = &timekeeper; + + *ts = timespec_add(*ts, tk->total_sleep_time); } EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { - return timekeeper.xtime_sec; + struct timekeeper *tk = &timekeeper; + + return tk->xtime_sec; } EXPORT_SYMBOL(get_seconds); struct timespec __current_kernel_time(void) { - return tk_xtime(&timekeeper); + struct timekeeper *tk = &timekeeper; + + return tk_xtime(tk); } struct timespec current_kernel_time(void) { + struct timekeeper *tk = &timekeeper; struct timespec now; unsigned long seq; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - now = tk_xtime(&timekeeper); - } while (read_seqretry(&timekeeper.lock, seq)); + now = tk_xtime(tk); + } while (read_seqretry(&tk->lock, seq)); return now; } @@ -1263,15 +1297,16 @@ EXPORT_SYMBOL(current_kernel_time); struct timespec get_monotonic_coarse(void) { + struct timekeeper *tk = &timekeeper; struct timespec now, mono; unsigned long seq; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - now = tk_xtime(&timekeeper); - mono = timekeeper.wall_to_monotonic; - } while (read_seqretry(&timekeeper.lock, seq)); + now = tk_xtime(tk); + mono = tk->wall_to_monotonic; + } while (read_seqretry(&tk->lock, seq)); set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, now.tv_nsec + mono.tv_nsec); @@ -1300,14 +1335,15 @@ void do_timer(unsigned long ticks) void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, struct timespec *wtom, struct timespec *sleep) { + struct timekeeper *tk = &timekeeper; unsigned long seq; do { - seq = read_seqbegin(&timekeeper.lock); - *xtim = tk_xtime(&timekeeper); - *wtom = timekeeper.wall_to_monotonic; - *sleep = timekeeper.total_sleep_time; - } while (read_seqretry(&timekeeper.lock, seq)); + seq = read_seqbegin(&tk->lock); + *xtim = tk_xtime(tk); + *wtom = tk->wall_to_monotonic; + *sleep = tk->total_sleep_time; + } while (read_seqretry(&tk->lock, seq)); } #ifdef CONFIG_HIGH_RES_TIMERS @@ -1321,19 +1357,20 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, */ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) { + struct timekeeper *tk = &timekeeper; ktime_t now; unsigned int seq; u64 secs, nsecs; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - secs = timekeeper.xtime_sec; - nsecs = timekeeping_get_ns(&timekeeper); + secs = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); - *offs_real = timekeeper.offs_real; - *offs_boot = timekeeper.offs_boot; - } while (read_seqretry(&timekeeper.lock, seq)); + *offs_real = tk->offs_real; + *offs_boot = tk->offs_boot; + } while (read_seqretry(&tk->lock, seq)); now = ktime_add_ns(ktime_set(secs, 0), nsecs); now = ktime_sub(now, *offs_real); @@ -1346,19 +1383,19 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) */ ktime_t ktime_get_monotonic_offset(void) { + struct timekeeper *tk = &timekeeper; unsigned long seq; struct timespec wtom; do { - seq = read_seqbegin(&timekeeper.lock); - wtom = timekeeper.wall_to_monotonic; - } while (read_seqretry(&timekeeper.lock, seq)); + seq = read_seqbegin(&tk->lock); + wtom = tk->wall_to_monotonic; + } while (read_seqretry(&tk->lock, seq)); return timespec_to_ktime(wtom); } EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); - /** * xtime_update() - advances the timekeeping infrastructure * @ticks: number of ticks, that have elapsed since the last call. diff --git a/kernel/timer.c b/kernel/timer.c index a61c09374eba..8c5e7b908c68 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1407,13 +1407,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds) #endif -#ifndef __alpha__ - -/* - * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this - * should be moved into arch/i386 instead? - */ - /** * sys_getpid - return the thread group id of the current process * @@ -1469,8 +1462,6 @@ SYSCALL_DEFINE0(getegid) return from_kgid_munged(current_user_ns(), current_egid()); } -#endif - static void process_timeout(unsigned long __data) { wake_up_process((struct task_struct *)__data); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index fee3752ae8f6..8a6d2ee2086c 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -281,7 +281,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) head = this_cpu_ptr(event_function.perf_events); perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, - 1, ®s, head); + 1, ®s, head, NULL); #undef ENTRY_SIZE } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b31d3d5699fe..1a2117043bb1 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); + perf_trace_buf_submit(entry, size, rctx, + entry->ip, 1, regs, head, NULL); } /* Kretprobe profile handler */ @@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); - perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); + perf_trace_buf_submit(entry, size, rctx, + entry->ret_ip, 1, regs, head, NULL); } #endif /* CONFIG_PERF_EVENTS */ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 96fc73369099..60e4d7875672 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -532,7 +532,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) (unsigned long *)&rec->args); head = this_cpu_ptr(sys_data->enter_event->perf_events); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); } int perf_sysenter_enable(struct ftrace_event_call *call) @@ -608,7 +608,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) rec->ret = syscall_get_return_value(current, regs); head = this_cpu_ptr(sys_data->exit_event->perf_events); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); } int perf_sysexit_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2b36ac68549e..03003cd7dd96 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); head = this_cpu_ptr(call->perf_events); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL); out: preempt_enable(); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 69add8a9da68..4b1dfba70f7c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -575,7 +575,7 @@ out: /* * Create/destroy watchdog threads as CPUs come and go: */ -static int +static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; @@ -610,27 +610,10 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_OK; } -static struct notifier_block cpu_nfb = { +static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; -#ifdef CONFIG_SUSPEND -/* - * On exit from suspend we force an offline->online transition on the boot CPU - * so that the PMU state that was lost while in suspended state gets set up - * properly for the boot CPU. This information is required for restarting the - * NMI watchdog. - */ -void lockup_detector_bootcpu_resume(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - - cpu_callback(&cpu_nfb, CPU_DEAD_FROZEN, cpu); - cpu_callback(&cpu_nfb, CPU_UP_PREPARE_FROZEN, cpu); - cpu_callback(&cpu_nfb, CPU_ONLINE_FROZEN, cpu); -} -#endif - void __init lockup_detector_init(void) { void *cpu = (void *)(long)smp_processor_id(); diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index f8a3f1a829b8..ba6085d9c741 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -12,7 +12,7 @@ #ifdef CONFIG_HOTPLUG_CPU static LIST_HEAD(percpu_counters); -static DEFINE_MUTEX(percpu_counters_lock); +static DEFINE_SPINLOCK(percpu_counters_lock); #endif #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER @@ -123,9 +123,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, #ifdef CONFIG_HOTPLUG_CPU INIT_LIST_HEAD(&fbc->list); - mutex_lock(&percpu_counters_lock); + spin_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); - mutex_unlock(&percpu_counters_lock); + spin_unlock(&percpu_counters_lock); #endif return 0; } @@ -139,9 +139,9 @@ void percpu_counter_destroy(struct percpu_counter *fbc) debug_percpu_counter_deactivate(fbc); #ifdef CONFIG_HOTPLUG_CPU - mutex_lock(&percpu_counters_lock); + spin_lock(&percpu_counters_lock); list_del(&fbc->list); - mutex_unlock(&percpu_counters_lock); + spin_unlock(&percpu_counters_lock); #endif free_percpu(fbc->counters); fbc->counters = NULL; @@ -170,7 +170,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, return NOTIFY_OK; cpu = (unsigned long)hcpu; - mutex_lock(&percpu_counters_lock); + spin_lock(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; unsigned long flags; @@ -181,7 +181,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, *pcount = 0; raw_spin_unlock_irqrestore(&fbc->lock, flags); } - mutex_unlock(&percpu_counters_lock); + spin_unlock(&percpu_counters_lock); #endif return NOTIFY_OK; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6b4718e2ee34..b41823cc05e6 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -39,12 +39,6 @@ DEFINE_SPINLOCK(bdi_lock); LIST_HEAD(bdi_list); LIST_HEAD(bdi_pending_list); -static struct task_struct *sync_supers_tsk; -static struct timer_list sync_supers_timer; - -static int bdi_sync_supers(void *); -static void sync_supers_timer_fn(unsigned long); - void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) { if (wb1 < wb2) { @@ -250,12 +244,6 @@ static int __init default_bdi_init(void) { int err; - sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); - BUG_ON(IS_ERR(sync_supers_tsk)); - - setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); - bdi_arm_supers_timer(); - err = bdi_init(&default_backing_dev_info); if (!err) bdi_register(&default_backing_dev_info, NULL, "default"); @@ -270,46 +258,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) return wb_has_dirty_io(&bdi->wb); } -/* - * kupdated() used to do this. We cannot do it from the bdi_forker_thread() - * or we risk deadlocking on ->s_umount. The longer term solution would be - * to implement sync_supers_bdi() or similar and simply do it from the - * bdi writeback thread individually. - */ -static int bdi_sync_supers(void *unused) -{ - set_user_nice(current, 0); - - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - - /* - * Do this periodically, like kupdated() did before. - */ - sync_supers(); - } - - return 0; -} - -void bdi_arm_supers_timer(void) -{ - unsigned long next; - - if (!dirty_writeback_interval) - return; - - next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies; - mod_timer(&sync_supers_timer, round_jiffies_up(next)); -} - -static void sync_supers_timer_fn(unsigned long unused) -{ - wake_up_process(sync_supers_tsk); - bdi_arm_supers_timer(); -} - static void wakeup_timer_fn(unsigned long data) { struct backing_dev_info *bdi = (struct backing_dev_info *)data; diff --git a/mm/compaction.c b/mm/compaction.c index e78cb9688421..7fcd3a52e68d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -51,6 +51,47 @@ static inline bool migrate_async_suitable(int migratetype) } /* + * Compaction requires the taking of some coarse locks that are potentially + * very heavily contended. Check if the process needs to be scheduled or + * if the lock is contended. For async compaction, back out in the event + * if contention is severe. For sync compaction, schedule. + * + * Returns true if the lock is held. + * Returns false if the lock is released and compaction should abort + */ +static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, + bool locked, struct compact_control *cc) +{ + if (need_resched() || spin_is_contended(lock)) { + if (locked) { + spin_unlock_irqrestore(lock, *flags); + locked = false; + } + + /* async aborts if taking too long or contended */ + if (!cc->sync) { + if (cc->contended) + *cc->contended = true; + return false; + } + + cond_resched(); + if (fatal_signal_pending(current)) + return false; + } + + if (!locked) + spin_lock_irqsave(lock, *flags); + return true; +} + +static inline bool compact_trylock_irqsave(spinlock_t *lock, + unsigned long *flags, struct compact_control *cc) +{ + return compact_checklock_irqsave(lock, flags, false, cc); +} + +/* * Isolate free pages onto a private freelist. Caller must hold zone->lock. * If @strict is true, will abort returning 0 on any invalid PFNs or non-free * pages inside of the pageblock (even though it may still end up isolating @@ -173,7 +214,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) } /* Update the number of anon and file isolated pages in the zone */ -static void acct_isolated(struct zone *zone, struct compact_control *cc) +static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) { struct page *page; unsigned int count[2] = { 0, }; @@ -181,8 +222,14 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) list_for_each_entry(page, &cc->migratepages, lru) count[!!page_is_file_cache(page)]++; - __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); - __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + /* If locked we can use the interrupt unsafe versions */ + if (locked) { + __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); + __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + } else { + mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); + mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + } } /* Similar to reclaim, but different enough that they don't share logic */ @@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, struct list_head *migratelist = &cc->migratepages; isolate_mode_t mode = 0; struct lruvec *lruvec; + unsigned long flags; + bool locked; /* * Ensure that there are not too many pages isolated from the LRU @@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, /* Time to isolate some pages for migration */ cond_resched(); - spin_lock_irq(&zone->lru_lock); + spin_lock_irqsave(&zone->lru_lock, flags); + locked = true; for (; low_pfn < end_pfn; low_pfn++) { struct page *page; - bool locked = true; /* give a chance to irqs before checking need_resched() */ if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { - spin_unlock_irq(&zone->lru_lock); + spin_unlock_irqrestore(&zone->lru_lock, flags); locked = false; } - if (need_resched() || spin_is_contended(&zone->lru_lock)) { - if (locked) - spin_unlock_irq(&zone->lru_lock); - cond_resched(); - spin_lock_irq(&zone->lru_lock); - if (fatal_signal_pending(current)) - break; - } else if (!locked) - spin_lock_irq(&zone->lru_lock); + + /* Check if it is ok to still hold the lock */ + locked = compact_checklock_irqsave(&zone->lru_lock, &flags, + locked, cc); + if (!locked) + break; /* * migrate_pfn does not necessarily start aligned to a @@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, } } - acct_isolated(zone, cc); + acct_isolated(zone, locked, cc); - spin_unlock_irq(&zone->lru_lock); + if (locked) + spin_unlock_irqrestore(&zone->lru_lock, flags); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); @@ -384,6 +431,20 @@ static bool suitable_migration_target(struct page *page) } /* + * Returns the start pfn of the last page block in a zone. This is the starting + * point for full compaction of a zone. Compaction searches for free pages from + * the end of each zone, while isolate_freepages_block scans forward inside each + * page block. + */ +static unsigned long start_free_pfn(struct zone *zone) +{ + unsigned long free_pfn; + free_pfn = zone->zone_start_pfn + zone->spanned_pages; + free_pfn &= ~(pageblock_nr_pages-1); + return free_pfn; +} + +/* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. */ @@ -422,17 +483,6 @@ static void isolate_freepages(struct zone *zone, pfn -= pageblock_nr_pages) { unsigned long isolated; - /* - * Skip ahead if another thread is compacting in the area - * simultaneously. If we wrapped around, we can only skip - * ahead if zone->compact_cached_free_pfn also wrapped to - * above our starting point. - */ - if (cc->order > 0 && (!cc->wrapped || - zone->compact_cached_free_pfn > - cc->start_free_pfn)) - pfn = min(pfn, zone->compact_cached_free_pfn); - if (!pfn_valid(pfn)) continue; @@ -458,7 +508,16 @@ static void isolate_freepages(struct zone *zone, * are disabled */ isolated = 0; - spin_lock_irqsave(&zone->lock, flags); + + /* + * The zone lock must be held to isolate freepages. This + * unfortunately this is a very coarse lock and can be + * heavily contended if there are parallel allocations + * or parallel compactions. For async compaction do not + * spin on the lock + */ + if (!compact_trylock_irqsave(&zone->lock, &flags, cc)) + break; if (suitable_migration_target(page)) { end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); isolated = isolate_freepages_block(pfn, end_pfn, @@ -474,7 +533,15 @@ static void isolate_freepages(struct zone *zone, */ if (isolated) { high_pfn = max(high_pfn, pfn); - if (cc->order > 0) + + /* + * If the free scanner has wrapped, update + * compact_cached_free_pfn to point to the highest + * pageblock with free pages. This reduces excessive + * scanning of full pageblocks near the end of the + * zone + */ + if (cc->order > 0 && cc->wrapped) zone->compact_cached_free_pfn = high_pfn; } } @@ -484,6 +551,11 @@ static void isolate_freepages(struct zone *zone, cc->free_pfn = high_pfn; cc->nr_freepages = nr_freepages; + + /* If compact_cached_free_pfn is reset then set it now */ + if (cc->order > 0 && !cc->wrapped && + zone->compact_cached_free_pfn == start_free_pfn(zone)) + zone->compact_cached_free_pfn = high_pfn; } /* @@ -570,20 +642,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, return ISOLATE_SUCCESS; } -/* - * Returns the start pfn of the last page block in a zone. This is the starting - * point for full compaction of a zone. Compaction searches for free pages from - * the end of each zone, while isolate_freepages_block scans forward inside each - * page block. - */ -static unsigned long start_free_pfn(struct zone *zone) -{ - unsigned long free_pfn; - free_pfn = zone->zone_start_pfn + zone->spanned_pages; - free_pfn &= ~(pageblock_nr_pages-1); - return free_pfn; -} - static int compact_finished(struct zone *zone, struct compact_control *cc) { @@ -771,7 +829,7 @@ out: static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - bool sync) + bool sync, bool *contended) { struct compact_control cc = { .nr_freepages = 0, @@ -780,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, .sync = sync, + .contended = contended, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -801,7 +860,7 @@ int sysctl_extfrag_threshold = 500; */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync) + bool sync, bool *contended) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; @@ -825,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - status = compact_zone_order(zone, order, gfp_mask, sync); + status = compact_zone_order(zone, order, gfp_mask, sync, + contended); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ @@ -861,7 +921,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) if (cc->order > 0) { int ok = zone_watermark_ok(zone, cc->order, low_wmark_pages(zone), 0, 0); - if (ok && cc->order > zone->compact_order_failed) + if (ok && cc->order >= zone->compact_order_failed) zone->compact_order_failed = cc->order + 1; /* Currently async compaction is never deferred. */ else if (!ok && cc->sync) diff --git a/mm/filemap.c b/mm/filemap.c index a4a5260b0279..fa5ca304148e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1712,8 +1712,35 @@ page_not_uptodate: } EXPORT_SYMBOL(filemap_fault); +int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page = vmf->page; + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + int ret = VM_FAULT_LOCKED; + + sb_start_pagefault(inode->i_sb); + file_update_time(vma->vm_file); + lock_page(page); + if (page->mapping != inode->i_mapping) { + unlock_page(page); + ret = VM_FAULT_NOPAGE; + goto out; + } + /* + * We mark the page dirty already here so that when freeze is in + * progress, we are guaranteed that writeback during freezing will + * see the dirty page and writeprotect it again. + */ + set_page_dirty(page); +out: + sb_end_pagefault(inode->i_sb); + return ret; +} +EXPORT_SYMBOL(filemap_page_mkwrite); + const struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, + .page_mkwrite = filemap_page_mkwrite, }; /* This is used for a general mmap of a disk file */ @@ -2407,8 +2434,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, count = ocount; pos = *ppos; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; @@ -2507,6 +2532,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, BUG_ON(iocb->ki_pos != pos); + sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); @@ -2520,6 +2546,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ret = err; } blk_finish_plug(&plug); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL(generic_file_aio_write); diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 213ca1f53409..13e013b1270c 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -304,6 +304,7 @@ out: static const struct vm_operations_struct xip_file_vm_ops = { .fault = xip_file_fault, + .page_mkwrite = filemap_page_mkwrite, }; int xip_file_mmap(struct file * file, struct vm_area_struct * vma) @@ -401,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, loff_t pos; ssize_t ret; + sb_start_write(inode->i_sb); + mutex_lock(&inode->i_mutex); if (!access_ok(VERIFY_READ, buf, len)) { @@ -411,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, pos = *ppos; count = len; - vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -436,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, current->backing_dev_info = NULL; out_up: mutex_unlock(&inode->i_mutex); + sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL_GPL(xip_file_write); diff --git a/mm/internal.h b/mm/internal.h index 3314f79d775a..b8c91b342e24 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -130,6 +130,7 @@ struct compact_control { int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; + bool *contended; /* True if a lock was contended */ }; unsigned long diff --git a/mm/memory.c b/mm/memory.c index 482f089765ff..57361708d1a5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2650,6 +2650,9 @@ reuse: if (!page_mkwrite) { wait_on_page_locked(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite); + /* file_update_time outside page_lock */ + if (vma->vm_file) + file_update_time(vma->vm_file); } put_page(dirty_page); if (page_mkwrite) { @@ -2667,10 +2670,6 @@ reuse: } } - /* file_update_time outside page_lock */ - if (vma->vm_file) - file_update_time(vma->vm_file); - return ret; } @@ -3339,12 +3338,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (dirty_page) { struct address_space *mapping = page->mapping; + int dirtied = 0; if (set_page_dirty(dirty_page)) - page_mkwrite = 1; + dirtied = 1; unlock_page(dirty_page); put_page(dirty_page); - if (page_mkwrite && mapping) { + if ((dirtied || page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still * dirty their pages @@ -3353,7 +3353,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, } /* file_update_time outside page_lock */ - if (vma->vm_file) + if (vma->vm_file && !page_mkwrite) file_update_time(vma->vm_file); } else { unlock_page(vmf.page); diff --git a/mm/mempool.c b/mm/mempool.c index d9049811f352..54990476c049 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -63,19 +63,21 @@ EXPORT_SYMBOL(mempool_destroy); mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { - return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); + return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, + GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_create); mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, int node_id) + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id) { mempool_t *pool; - pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); + pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) return NULL; pool->elements = kmalloc_node(min_nr * sizeof(void *), - GFP_KERNEL, node_id); + gfp_mask, node_id); if (!pool->elements) { kfree(pool); return NULL; @@ -93,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, while (pool->curr_nr < pool->min_nr) { void *element; - element = pool->alloc(GFP_KERNEL, pool->pool_data); + element = pool->alloc(gfp_mask, pool->pool_data); if (unlikely(!element)) { mempool_destroy(pool); return NULL; diff --git a/mm/mmap.c b/mm/mmap.c index e3e86914f11a..9adee9fc0d8a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2309,7 +2309,7 @@ void exit_mmap(struct mm_struct *mm) } vm_unacct_memory(nr_accounted); - BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } /* Insert vm structure into process list sorted by address diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e5363f34e025..5ad5ce23c1e0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1532,7 +1532,6 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); - bdi_arm_supers_timer(); return 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 889532b8e6c1..c66fb875104a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1928,6 +1928,17 @@ this_zone_full: zlc_active = 0; goto zonelist_scan; } + + if (page) + /* + * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was + * necessary to allocate the page. The expectation is + * that the caller is taking steps that will free more + * memory. The caller should avoid the page being used + * for !PFMEMALLOC purposes. + */ + page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); + return page; } @@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, bool sync_migration, - bool *deferred_compaction, + bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { struct page *page; @@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, - nodemask, sync_migration); + nodemask, sync_migration, + contended_compaction); current->flags &= ~PF_MEMALLOC; if (*did_some_progress != COMPACT_SKIPPED) { @@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, bool sync_migration, - bool *deferred_compaction, + bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { return NULL; @@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress; bool sync_migration = false; bool deferred_compaction = false; + bool contended_compaction = false; /* * In the slowpath, we sanity check order to avoid ever trying to @@ -2389,14 +2402,6 @@ rebalance: zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); if (page) { - /* - * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was - * necessary to allocate the page. The expectation is - * that the caller is taking steps that will free more - * memory. The caller should avoid the page being used - * for !PFMEMALLOC purposes. - */ - page->pfmemalloc = true; goto got_pg; } } @@ -2422,6 +2427,7 @@ rebalance: nodemask, alloc_flags, preferred_zone, migratetype, sync_migration, + &contended_compaction, &deferred_compaction, &did_some_progress); if (page) @@ -2431,10 +2437,11 @@ rebalance: /* * If compaction is deferred for high-order allocations, it is because * sync compaction recently failed. In this is the case and the caller - * has requested the system not be heavily disrupted, fail the - * allocation now instead of entering direct reclaim + * requested a movable allocation that does not heavily disrupt the + * system then fail the allocation instead of entering direct reclaim. */ - if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) + if ((deferred_compaction || contended_compaction) && + (gfp_mask & __GFP_NO_KSWAPD)) goto nopage; /* Try direct reclaim and then allocating */ @@ -2505,6 +2512,7 @@ rebalance: nodemask, alloc_flags, preferred_zone, migratetype, sync_migration, + &contended_compaction, &deferred_compaction, &did_some_progress); if (page) @@ -2569,8 +2577,6 @@ retry_cpuset: page = __alloc_pages_slowpath(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); - else - page->pfmemalloc = false; trace_mm_page_alloc(page, order, gfp_mask, migratetype); @@ -4511,7 +4517,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, pg_data_t *pgdat = NODE_DATA(nid); /* pg_data_t should be reset to zero when it's allocated */ - WARN_ON(pgdat->nr_zones || pgdat->node_start_pfn || pgdat->classzone_idx); + WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 73a2a83ee2da..402442402af7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, return rc; } +static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) +{ +#ifdef CONFIG_NET_POLL_CONTROLLER + if (vlan->netpoll) + netpoll_send_skb(vlan->netpoll, skb); +#else + BUG(); +#endif + return NETDEV_TX_OK; +} + static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); unsigned int len; int ret; @@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != htons(ETH_P_8021Q) || - vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { + vlan->flags & VLAN_FLAG_REORDER_HDR) { u16 vlan_tci; - vlan_tci = vlan_dev_priv(dev)->vlan_id; + vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } - skb->dev = vlan_dev_priv(dev)->real_dev; + skb->dev = vlan->real_dev; len = skb->len; - if (netpoll_tx_running(dev)) - return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); + if (unlikely(netpoll_tx_running(dev))) + return vlan_netpoll_send_skb(vlan, skb); + ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; - stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); + stats = this_cpu_ptr(vlan->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); } else { - this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); + this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); } return ret; @@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev) return; } -static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) +static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, + gfp_t gfp) { - struct vlan_dev_priv *info = vlan_dev_priv(dev); - struct net_device *real_dev = info->real_dev; + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; struct netpoll *netpoll; int err = 0; - netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); + netpoll = kzalloc(sizeof(*netpoll), gfp); err = -ENOMEM; if (!netpoll) goto out; - err = __netpoll_setup(netpoll, real_dev); + err = __netpoll_setup(netpoll, real_dev, gfp); if (err) { kfree(netpoll); goto out; } - info->netpoll = netpoll; + vlan->netpoll = netpoll; out: return err; @@ -695,19 +709,15 @@ out: static void vlan_dev_netpoll_cleanup(struct net_device *dev) { - struct vlan_dev_priv *info = vlan_dev_priv(dev); - struct netpoll *netpoll = info->netpoll; + struct vlan_dev_priv *vlan= vlan_dev_priv(dev); + struct netpoll *netpoll = vlan->netpoll; if (!netpoll) return; - info->netpoll = NULL; - - /* Wait for transmitting packets to finish before freeing. */ - synchronize_rcu_bh(); + vlan->netpoll = NULL; - __netpoll_cleanup(netpoll); - kfree(netpoll); + __netpoll_free_rcu(netpoll); } #endif /* CONFIG_NET_POLL_CONTROLLER */ diff --git a/net/atm/common.c b/net/atm/common.c index b4b44dbed645..0c0ad930a632 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) return -ENOTCONN; + memset(&pvc, 0, sizeof(pvc)); pvc.sap_family = AF_ATMPVC; pvc.sap_addr.itf = vcc->dev->number; pvc.sap_addr.vpi = vcc->vpi; diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 3a734919c36c..ae0324021407 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr, return -ENOTCONN; *sockaddr_len = sizeof(struct sockaddr_atmpvc); addr = (struct sockaddr_atmpvc *)sockaddr; + memset(addr, 0, sizeof(*addr)); addr->sap_family = AF_ATMPVC; addr->sap_addr.itf = vcc->dev->number; addr->sap_addr.vpi = vcc->vpi; diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index b421cc49d2cd..fc866f2e4528 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv) if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) goto out; - if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect)) - goto out; - curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) + goto out; + next_gw = batadv_gw_get_best_gw_node(bat_priv); if (curr_gw == next_gw) diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a438f4b582fc..99dd8f75b3ff 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, del: list_del(&entry->list); kfree(entry); + kfree(tt_change_node); event_removed = true; goto unlock; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 41ff978a33f9..715d7e33fba0 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1365,6 +1365,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev) return false; e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); + if (!e) + return false; + if (hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; return true; @@ -1393,12 +1396,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, return; e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); - if (e) { + /* If the device was not found in a list of found devices names of which + * are pending. there is no need to continue resolving a next name as it + * will be done upon receiving another Remote Name Request Complete + * Event */ + if (!e) + return; + + list_del(&e->list); + if (name) { e->name_state = NAME_KNOWN; - list_del(&e->list); - if (name) - mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, - e->data.rssi, name, name_len); + mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, + e->data.rssi, name, name_len); + } else { + e->name_state = NAME_NOT_KNOWN; } if (hci_resolve_next_name(hdev)) @@ -1762,7 +1773,12 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; + + if (!conn->out && !hci_conn_ssp_enabled(conn) && + !hci_find_link_key(hdev, &ev->bdaddr)) + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + else + conn->disc_timeout = HCI_DISCONN_TIMEOUT; } else conn->state = BT_CONNECTED; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index a7f04de03d79..19fdac78e555 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -694,6 +694,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, *addr_len = sizeof(*haddr); haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; + haddr->hci_channel= 0; release_sock(sk); return 0; @@ -1009,6 +1010,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, { struct hci_filter *f = &hci_pi(sk)->filter; + memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index a8964db04bfb..daa149b7003c 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1181,6 +1181,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) sk = chan->sk; hci_conn_hold(conn->hcon); + conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a4bb27e8427e..1497edd191a2 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -245,6 +245,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l BT_DBG("sock %p, sk %p", sock, sk); + memset(la, 0, sizeof(struct sockaddr_l2)); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); @@ -1174,7 +1175,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p chan = l2cap_chan_create(); if (!chan) { - l2cap_sock_kill(sk); + sk_free(sk); return NULL; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7e1e59645c05..1a17850d093c 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -528,6 +528,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int * BT_DBG("sock %p, sk %p", sock, sk); + memset(sa, 0, sizeof(*sa)); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) @@ -822,6 +823,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c } sec.level = rfcomm_pi(sk)->sec_level; + sec.key_size = 0; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index cb960773c002..56f182393c4c 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -456,7 +456,7 @@ static int rfcomm_get_dev_list(void __user *arg) size = sizeof(*dl) + dev_num * sizeof(*di); - dl = kmalloc(size, GFP_KERNEL); + dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 40bbe25dcff7..3589e21edb09 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -131,6 +131,15 @@ static int sco_conn_del(struct hci_conn *hcon, int err) sco_sock_clear_timer(sk); sco_chan_del(sk, err); bh_unlock_sock(sk); + + sco_conn_lock(conn); + conn->sk = NULL; + sco_pi(sk)->conn = NULL; + sco_conn_unlock(conn); + + if (conn->hcon) + hci_conn_put(conn->hcon); + sco_sock_kill(sk); } @@ -821,16 +830,6 @@ static void sco_chan_del(struct sock *sk, int err) BT_DBG("sk %p, conn %p, err %d", sk, conn, err); - if (conn) { - sco_conn_lock(conn); - conn->sk = NULL; - sco_pi(sk)->conn = NULL; - sco_conn_unlock(conn); - - if (conn->hcon) - hci_conn_put(conn->hcon); - } - sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 16ef0dc85a0a..901a616c8083 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -579,8 +579,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) smp = smp_chan_create(conn); + else + smp = conn->smp_chan; - smp = conn->smp_chan; + if (!smp) + return SMP_UNSPECIFIED; smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], req, sizeof(*req)); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 333484537600..070e8a68cfc6 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -31,9 +31,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) struct net_bridge_mdb_entry *mdst; struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); + rcu_read_lock(); #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { br_nf_pre_routing_finish_bridge_slow(skb); + rcu_read_unlock(); return NETDEV_TX_OK; } #endif @@ -48,7 +50,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); - rcu_read_lock(); if (is_broadcast_ether_addr(dest)) br_flood_deliver(br, skb); else if (is_multicast_ether_addr(dest)) { @@ -206,24 +207,23 @@ static void br_poll_controller(struct net_device *br_dev) static void br_netpoll_cleanup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - struct net_bridge_port *p, *n; + struct net_bridge_port *p; - list_for_each_entry_safe(p, n, &br->port_list, list) { + list_for_each_entry(p, &br->port_list, list) br_netpoll_disable(p); - } } -static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, + gfp_t gfp) { struct net_bridge *br = netdev_priv(dev); - struct net_bridge_port *p, *n; + struct net_bridge_port *p; int err = 0; - list_for_each_entry_safe(p, n, &br->port_list, list) { + list_for_each_entry(p, &br->port_list, list) { if (!p->dev) continue; - - err = br_netpoll_enable(p); + err = br_netpoll_enable(p, gfp); if (err) goto fail; } @@ -236,17 +236,17 @@ fail: goto out; } -int br_netpoll_enable(struct net_bridge_port *p) +int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) { struct netpoll *np; int err = 0; - np = kzalloc(sizeof(*p->np), GFP_KERNEL); + np = kzalloc(sizeof(*p->np), gfp); err = -ENOMEM; if (!np) goto out; - err = __netpoll_setup(np, p->dev); + err = __netpoll_setup(np, p->dev, gfp); if (err) { kfree(np); goto out; @@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p) p->np = NULL; - /* Wait for transmitting packets to finish before freeing. */ - synchronize_rcu_bh(); - - __netpoll_cleanup(np); - kfree(np); + __netpoll_free_rcu(np); } #endif diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index e9466d412707..02015a505d2a 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { skb->dev = to->dev; - if (unlikely(netpoll_tx_running(to->dev))) { + if (unlikely(netpoll_tx_running(to->br->dev))) { if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) kfree_skb(skb); else { diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e1144e1617be..1c8fdc3558cd 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -361,7 +361,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err2; - if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) + if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL)))) goto err3; err = netdev_set_master(dev, br->dev); @@ -427,6 +427,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) if (!p || p->br != br) return -EINVAL; + /* Since more than one interface can be attached to a bridge, + * there still maybe an alternate path for netconsole to use; + * therefore there is no reason for a NETDEV_RELEASE event. + */ del_nbp(p); spin_lock_bh(&br->lock); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index a768b2408edf..f507d2af9646 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -316,7 +316,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, netpoll_send_skb(np, skb); } -extern int br_netpoll_enable(struct net_bridge_port *p); +extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp); extern void br_netpoll_disable(struct net_bridge_port *p); #else static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) @@ -329,7 +329,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, { } -static inline int br_netpoll_enable(struct net_bridge_port *p) +static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) { return 0; } diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 69771c04ba8f..e597733affb8 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) /* check the version of IP */ ip_version = skb_header_pointer(skb, 0, 1, &buf); + if (!ip_version) { + kfree_skb(skb); + return -EINVAL; + } switch (*ip_version >> 4) { case 4: diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index b780cb7947dd..9da7fdd3cd8a 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) { struct ceph_crypto_key *ckey = key->payload.data; ceph_crypto_key_destroy(ckey); + kfree(ckey); } struct key_type key_type_ceph = { diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h index 1919d1550d75..3572dc518bc9 100644 --- a/net/ceph/crypto.h +++ b/net/ceph/crypto.h @@ -16,7 +16,8 @@ struct ceph_crypto_key { static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) { - kfree(key->key); + if (key) + kfree(key->key); } extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst, diff --git a/net/core/dev.c b/net/core/dev.c index 0cb3fe8d8e72..83988362805e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1055,6 +1055,8 @@ rollback: */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { + char *new_ifalias; + ASSERT_RTNL(); if (len >= IFALIASZ) @@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return 0; } - dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); - if (!dev->ifalias) + new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); + if (!new_ifalias) return -ENOMEM; + dev->ifalias = new_ifalias; strlcpy(dev->ifalias, alias, len+1); return len; @@ -1639,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb, return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) +{ + if (ptype->af_packet_priv == NULL) + return false; + + if (ptype->id_match) + return ptype->id_match(ptype, skb->sk); + else if ((struct sock *)ptype->af_packet_priv == skb->sk) + return true; + + return false; +} + /* * Support routine. Sends outgoing frames to any network * taps currently in use. @@ -1656,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) * they originated from - MvS (miquels@drinkel.ow.org) */ if ((ptype->dev == dev || !ptype->dev) && - (ptype->af_packet_priv == NULL || - (struct sock *)ptype->af_packet_priv != skb->sk)) { + (!skb_loop_sk(ptype, skb))) { if (pt_prev) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; @@ -2134,6 +2149,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) __be16 protocol = skb->protocol; netdev_features_t features = skb->dev->features; + if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) + features &= ~NETIF_F_GSO_MASK; + if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; @@ -5726,6 +5744,7 @@ EXPORT_SYMBOL(netdev_refcnt_read); /** * netdev_wait_allrefs - wait until all references are gone. + * @dev: target net_device * * This is called when unregistering network devices. * @@ -5986,6 +6005,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; + dev->gso_max_segs = GSO_MAX_SEGS; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); diff --git a/net/core/dst.c b/net/core/dst.c index 069d51d29414..56d63612e1e4 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb) } EXPORT_SYMBOL(dst_discard); -const u32 dst_default_metrics[RTAX_MAX]; +const u32 dst_default_metrics[RTAX_MAX + 1] = { + /* This initializer is needed to force linker to place this variable + * into const section. Otherwise it might end into bss section. + * We really want to avoid false sharing on this variable, and catch + * any writes on it. + */ + [RTAX_MAX] = 0xdeadbeef, +}; + void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, int initial_obsolete, unsigned short flags) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b4c90e42b443..346b1eb83a1f 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -26,6 +26,7 @@ #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/if_vlan.h> #include <net/tcp.h> #include <net/udp.h> #include <asm/unaligned.h> @@ -54,7 +55,7 @@ static atomic_t trapped; MAX_UDP_CHUNK) static void zap_completion_queue(void); -static void arp_reply(struct sk_buff *skb); +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); static unsigned int carrier_timeout = 4; module_param(carrier_timeout, uint, 0644); @@ -167,15 +168,24 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int budget = 16; + WARN_ON_ONCE(!irqs_disabled()); + list_for_each_entry(napi, &dev->napi_list, dev_list) { + local_irq_enable(); if (napi->poll_owner != smp_processor_id() && spin_trylock(&napi->poll_lock)) { - budget = poll_one_napi(dev->npinfo, napi, budget); + rcu_read_lock_bh(); + budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), + napi, budget); + rcu_read_unlock_bh(); spin_unlock(&napi->poll_lock); - if (!budget) + if (!budget) { + local_irq_disable(); break; + } } + local_irq_disable(); } } @@ -185,13 +195,14 @@ static void service_arp_queue(struct netpoll_info *npi) struct sk_buff *skb; while ((skb = skb_dequeue(&npi->arp_tx))) - arp_reply(skb); + netpoll_arp_reply(skb, npi); } } static void netpoll_poll_dev(struct net_device *dev) { const struct net_device_ops *ops; + struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); if (!dev || !netif_running(dev)) return; @@ -206,17 +217,18 @@ static void netpoll_poll_dev(struct net_device *dev) poll_napi(dev); if (dev->flags & IFF_SLAVE) { - if (dev->npinfo) { + if (ni) { struct net_device *bond_dev = dev->master; struct sk_buff *skb; - while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { + struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); + while ((skb = skb_dequeue(&ni->arp_tx))) { skb->dev = bond_dev; - skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); + skb_queue_tail(&bond_ni->arp_tx, skb); } } } - service_arp_queue(dev->npinfo); + service_arp_queue(ni); zap_completion_queue(); } @@ -302,6 +314,7 @@ static int netpoll_owner_active(struct net_device *dev) return 0; } +/* call with IRQ disabled */ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, struct net_device *dev) { @@ -309,8 +322,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, unsigned long tries; const struct net_device_ops *ops = dev->netdev_ops; /* It is up to the caller to keep npinfo alive. */ - struct netpoll_info *npinfo = np->dev->npinfo; + struct netpoll_info *npinfo; + + WARN_ON_ONCE(!irqs_disabled()); + npinfo = rcu_dereference_bh(np->dev->npinfo); if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { __kfree_skb(skb); return; @@ -319,16 +335,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, /* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq; - unsigned long flags; txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - local_irq_save(flags); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (__netif_tx_trylock(txq)) { if (!netif_xmit_stopped(txq)) { + if (vlan_tx_tag_present(skb) && + !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { + skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); + if (unlikely(!skb)) + break; + skb->vlan_tci = 0; + } + status = ops->ndo_start_xmit(skb, dev); if (status == NETDEV_TX_OK) txq_trans_update(txq); @@ -347,10 +369,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, } WARN_ONCE(!irqs_disabled(), - "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", dev->name, ops->ndo_start_xmit); - local_irq_restore(flags); } if (status != NETDEV_TX_OK) { @@ -423,9 +444,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) } EXPORT_SYMBOL(netpoll_send_udp); -static void arp_reply(struct sk_buff *skb) +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) { - struct netpoll_info *npinfo = skb->dev->npinfo; struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; @@ -543,13 +563,12 @@ static void arp_reply(struct sk_buff *skb) spin_unlock_irqrestore(&npinfo->rx_lock, flags); } -int __netpoll_rx(struct sk_buff *skb) +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) { int proto, len, ulen; int hits = 0; const struct iphdr *iph; struct udphdr *uh; - struct netpoll_info *npinfo = skb->dev->npinfo; struct netpoll *np, *tmp; if (list_empty(&npinfo->rx_np)) @@ -565,6 +584,12 @@ int __netpoll_rx(struct sk_buff *skb) return 1; } + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { + skb = vlan_untag(skb); + if (unlikely(!skb)) + goto out; + } + proto = ntohs(eth_hdr(skb)->h_proto); if (proto != ETH_P_IP) goto out; @@ -715,7 +740,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) } EXPORT_SYMBOL(netpoll_parse_options); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev) +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) { struct netpoll_info *npinfo; const struct net_device_ops *ops; @@ -734,7 +759,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) } if (!ndev->npinfo) { - npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); + npinfo = kmalloc(sizeof(*npinfo), gfp); if (!npinfo) { err = -ENOMEM; goto out; @@ -752,7 +777,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) ops = np->dev->netdev_ops; if (ops->ndo_netpoll_setup) { - err = ops->ndo_netpoll_setup(ndev, npinfo); + err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); if (err) goto free_npinfo; } @@ -857,7 +882,7 @@ int netpoll_setup(struct netpoll *np) refill_skbs(); rtnl_lock(); - err = __netpoll_setup(np, ndev); + err = __netpoll_setup(np, ndev, GFP_KERNEL); rtnl_unlock(); if (err) @@ -878,6 +903,24 @@ static int __init netpoll_init(void) } core_initcall(netpoll_init); +static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) +{ + struct netpoll_info *npinfo = + container_of(rcu_head, struct netpoll_info, rcu); + + skb_queue_purge(&npinfo->arp_tx); + skb_queue_purge(&npinfo->txq); + + /* we can't call cancel_delayed_work_sync here, as we are in softirq */ + cancel_delayed_work(&npinfo->tx_work); + + /* clean after last, unfinished work */ + __skb_queue_purge(&npinfo->txq); + /* now cancel it again */ + cancel_delayed_work(&npinfo->tx_work); + kfree(npinfo); +} + void __netpoll_cleanup(struct netpoll *np) { struct netpoll_info *npinfo; @@ -903,20 +946,24 @@ void __netpoll_cleanup(struct netpoll *np) ops->ndo_netpoll_cleanup(np->dev); RCU_INIT_POINTER(np->dev->npinfo, NULL); + call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); + } +} +EXPORT_SYMBOL_GPL(__netpoll_cleanup); - /* avoid racing with NAPI reading npinfo */ - synchronize_rcu_bh(); +static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) +{ + struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); - skb_queue_purge(&npinfo->arp_tx); - skb_queue_purge(&npinfo->txq); - cancel_delayed_work_sync(&npinfo->tx_work); + __netpoll_cleanup(np); + kfree(np); +} - /* clean after last, unfinished work */ - __skb_queue_purge(&npinfo->txq); - kfree(npinfo); - } +void __netpoll_free_rcu(struct netpoll *np) +{ + call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); } -EXPORT_SYMBOL_GPL(__netpoll_cleanup); +EXPORT_SYMBOL_GPL(__netpoll_free_rcu); void netpoll_cleanup(struct netpoll *np) { diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index ed0c0431fcd8..c75e3f9d060f 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev) u32 max_len; struct netprio_map *map; - rtnl_lock(); max_len = atomic_read(&max_prioidx) + 1; map = rtnl_dereference(dev->priomap); if (!map || map->priomap_len < max_len) ret = extend_netdev_table(dev, max_len); - rtnl_unlock(); return ret; } @@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft, if (!dev) goto out_free_devname; + rtnl_lock(); ret = write_update_netdev_table(dev); if (ret < 0) goto out_put_dev; - rcu_read_lock(); - map = rcu_dereference(dev->priomap); + map = rtnl_dereference(dev->priomap); if (map) map->priomap[prioidx] = priority; - rcu_read_unlock(); out_put_dev: + rtnl_unlock(); dev_put(dev); out_free_devname: @@ -277,12 +275,6 @@ out_free_devname: void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *p; - char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL); - - if (!tmp) { - pr_warn("Unable to attach cgrp due to alloc failure!\n"); - return; - } cgroup_taskset_for_each(p, cgrp, tset) { unsigned int fd; @@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) continue; } - rcu_read_lock(); + spin_lock(&files->file_lock); fdt = files_fdtable(files); for (fd = 0; fd < fdt->max_fds; fd++) { - char *path; struct file *file; struct socket *sock; - unsigned long s; - int rv, err = 0; + int err; file = fcheck_files(files, fd); if (!file) continue; - path = d_path(&file->f_path, tmp, PAGE_SIZE); - rv = sscanf(path, "socket:[%lu]", &s); - if (rv <= 0) - continue; - sock = sock_from_file(file, &err); - if (!err) + if (sock) sock_update_netprioidx(sock->sk, p); } - rcu_read_unlock(); + spin_unlock(&files->file_lock); task_unlock(p); } - kfree(tmp); } static struct cftype ss_files[] = { diff --git a/net/core/scm.c b/net/core/scm.c index 8f6ccfd68ef4..040cebeed45b 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; i++, cmfptr++) { + struct socket *sock; int new_fd; err = security_file_receive(fp[i]); if (err) @@ -281,6 +282,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) } /* Bump the usage count and install the file. */ get_file(fp[i]); + sock = sock_from_file(fp[i], &err); + if (sock) + sock_update_netprioidx(sock->sk, current); fd_install(new_fd, fp[i]); } diff --git a/net/core/sock.c b/net/core/sock.c index 6b654b3ddfda..8f67ced8d6a8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; + sk->sk_gso_max_segs = dst->dev->gso_max_segs; } } } diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 75c3582a7678..fb85d371a8de 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, u32 __user *optval, int __user *optlen) { int rc = -ENOPROTOOPT; - if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) + if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, optval, optlen); return rc; @@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk, u32 __user *optval, int __user *optlen) { int rc = -ENOPROTOOPT; - if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) + if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, optval, optlen); return rc; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index d65e98798eca..119c04317d48 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -535,6 +535,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, case DCCP_SOCKOPT_CCID_TX_INFO: if (len < sizeof(tfrc)) return -EINVAL; + memset(&tfrc, 0, sizeof(tfrc)); tfrc.tfrctx_x = hc->tx_x; tfrc.tfrctx_x_recv = hc->tx_x_recv; tfrc.tfrctx_x_calc = hc->tx_x_calc; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index f0cdb30921c0..57bd978483e1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head) static inline void free_leaf(struct leaf *l) { - call_rcu_bh(&l->rcu, __leaf_free_rcu); + call_rcu(&l->rcu, __leaf_free_rcu); } static inline void free_leaf_info(struct leaf_info *leaf) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index db0cf17c00f7..7f75f21d7b83 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -404,12 +404,15 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, { const struct inet_request_sock *ireq = inet_rsk(req); struct inet_sock *newinet = inet_sk(newsk); - struct ip_options_rcu *opt = ireq->opt; + struct ip_options_rcu *opt; struct net *net = sock_net(sk); struct flowi4 *fl4; struct rtable *rt; fl4 = &newinet->cork.fl.u.ip4; + + rcu_read_lock(); + opt = rcu_dereference(newinet->inet_opt); flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -421,11 +424,13 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, goto no_route; if (opt && opt->opt.is_strictroute && rt->rt_gateway) goto route_err; + rcu_read_unlock(); return &rt->dst; route_err: ip_rt_put(rt); no_route: + rcu_read_unlock(); IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ba39a52d18c1..c196d749daf2 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); - if (neigh) { + if (!IS_ERR(neigh)) { int res = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); @@ -1338,10 +1338,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk, iph->ihl = 5; iph->tos = inet->tos; iph->frag_off = df; - ip_select_ident(iph, &rt->dst, sk); iph->ttl = ttl; iph->protocol = sk->sk_protocol; ip_copy_addrs(iph, fl4); + ip_select_ident(iph, &rt->dst, sk); if (opt) { iph->ihl += opt->optlen>>2; @@ -1366,9 +1366,8 @@ out: return skb; } -int ip_send_skb(struct sk_buff *skb) +int ip_send_skb(struct net *net, struct sk_buff *skb) { - struct net *net = sock_net(skb->sk); int err; err = ip_local_out(skb); @@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) return 0; /* Netfilter gets whole the not fragmented skb. */ - return ip_send_skb(skb); + return ip_send_skb(sock_net(sk), skb); } /* @@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; + skb_orphan(nskb); skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); ip_push_pending_frames(sk, &fl4); } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index ea4a23813d26..4ad9cf173992 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, hdr, NULL, &matchoff, &matchlen, &addr, &port) > 0) { - unsigned int matchend, poff, plen, buflen, n; + unsigned int olen, matchend, poff, plen, buflen, n; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; /* We're only interested in headers related to this @@ -163,17 +163,18 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, goto next; } + olen = *datalen; if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; - matchend = matchoff + matchlen; + matchend = matchoff + matchlen + *datalen - olen; /* The maddr= parameter (RFC 2361) specifies where to send * the reply. */ if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, "maddr=", &poff, &plen, - &addr) > 0 && + &addr, true) > 0 && addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { buflen = sprintf(buffer, "%pI4", @@ -187,7 +188,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, * from which the server received the request. */ if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, "received=", &poff, &plen, - &addr) > 0 && + &addr, false) > 0 && addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { buflen = sprintf(buffer, "%pI4", diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c035251beb07..fd9ecb52c66b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -70,7 +70,6 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/bootmem.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> @@ -80,7 +79,6 @@ #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/init.h> -#include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/igmp.h> @@ -88,11 +86,9 @@ #include <linux/mroute.h> #include <linux/netfilter_ipv4.h> #include <linux/random.h> -#include <linux/jhash.h> #include <linux/rcupdate.h> #include <linux/times.h> #include <linux/slab.h> -#include <linux/prefetch.h> #include <net/dst.h> #include <net/net_namespace.h> #include <net/protocol.h> @@ -2032,7 +2028,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) } dev_out = net->loopback_dev; fl4->flowi4_oif = dev_out->ifindex; - res.fi = NULL; flags |= RTCF_LOCAL; goto make_route; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e7e6eeae49c0..2109ff4a1daf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, old_size_goal + mss_now > xmit_size_goal)) { xmit_size_goal = old_size_goal; } else { - tp->xmit_size_goal_segs = xmit_size_goal / mss_now; + tp->xmit_size_goal_segs = + min_t(u16, xmit_size_goal / mss_now, + sk->sk_gso_max_segs); xmit_size_goal = tp->xmit_size_goal_segs * mss_now; } } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 4d4db16e336e..1432cdb0644c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && - left * tp->mss_cache < sk->sk_gso_max_size) + left * tp->mss_cache < sk->sk_gso_max_size && + left < sk->sk_gso_max_segs) return true; return left <= tcp_max_tso_deferred_mss(tp); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2fd2bc9e3c64..85308b90df80 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); + if (unlikely(sk->sk_rx_dst == NULL)) + inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. * The code loosely follows the one in the famous @@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) tcp_set_state(sk, TCP_ESTABLISHED); if (skb != NULL) { - inet_sk_rx_dst_set(sk, skb); + icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 42b2a6a73092..00a748d14062 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -417,10 +417,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ tp->mtu_info = info; - if (!sock_owned_by_user(sk)) + if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); - else - set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); + } else { + if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) + sock_hold(sk); + } goto out; } @@ -1462,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; + inet_sk_rx_dst_set(newsk, skb); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); @@ -1627,9 +1630,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_rx_dst = NULL; } } - if (unlikely(sk->sk_rx_dst == NULL)) - inet_sk_rx_dst_set(sk, skb); - if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; @@ -1872,10 +1872,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_destructor= tcp_twsk_destructor, }; +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; +} +EXPORT_SYMBOL(inet_sk_rx_dst_set); + const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, + .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 2288a6399e1e..0abe67bb4d3a 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net) static void __net_exit tcp_net_metrics_exit(struct net *net) { + unsigned int i; + + for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) { + struct tcp_metrics_block *tm, *next; + + tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1); + while (tm) { + next = rcu_dereference_protected(tm->tcpm_next, 1); + kfree(tm); + tm = next; + } + } kfree(net->ipv4.tcp_metrics_hash); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 232a90c3ec86..6ff7f10dce9d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -387,8 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct tcp_sock *oldtp = tcp_sk(sk); struct tcp_cookie_values *oldcvp = oldtp->cookie_values; - inet_sk_rx_dst_set(newsk, skb); - /* TCP Cookie Transactions require space for the cookie pair, * as it differs for each connection. There is no need to * copy any s_data_payload stored at the original socket. diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10b..d04632673a9e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk) if (flags & (1UL << TCP_TSQ_DEFERRED)) tcp_tsq_handler(sk); - if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) + if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { tcp_write_timer_handler(sk); - - if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) + __sock_put(sk); + } + if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { tcp_delack_timer_handler(sk); - - if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) + __sock_put(sk); + } + if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { sk->sk_prot->mtu_reduced(sk); + __sock_put(sk); + } } EXPORT_SYMBOL(tcp_release_cb); @@ -940,7 +944,7 @@ void __init tcp_tasklet_init(void) * We cant xmit new skbs from this context, as we might already * hold qdisc lock. */ -void tcp_wfree(struct sk_buff *skb) +static void tcp_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); @@ -1522,21 +1526,21 @@ static void tcp_cwnd_validate(struct sock *sk) * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, - unsigned int mss_now, unsigned int cwnd) + unsigned int mss_now, unsigned int max_segs) { const struct tcp_sock *tp = tcp_sk(sk); - u32 needed, window, cwnd_len; + u32 needed, window, max_len; window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; - cwnd_len = mss_now * cwnd; + max_len = mss_now * max_segs; - if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) - return cwnd_len; + if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) + return max_len; needed = min(skb->len, window); - if (cwnd_len <= needed) - return cwnd_len; + if (max_len <= needed) + return max_len; return needed - needed % mss_now; } @@ -1765,7 +1769,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ - if (limit >= sk->sk_gso_max_size) + if (limit >= min_t(unsigned int, sk->sk_gso_max_size, + sk->sk_gso_max_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ @@ -1999,7 +2004,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, limit = mss_now; if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, - cwnd_quota); + min_t(unsigned int, + cwnd_quota, + sk->sk_gso_max_segs)); if (skb->len > limit && unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 6df36ad55a38..b774a03bd1dc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -252,7 +252,8 @@ static void tcp_delack_timer(unsigned long data) inet_csk(sk)->icsk_ack.blocked = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); /* deleguate our work to tcp_release_cb() */ - set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); + if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) + sock_hold(sk); } bh_unlock_sock(sk); sock_put(sk); @@ -481,7 +482,8 @@ static void tcp_write_timer(unsigned long data) tcp_write_timer_handler(sk); } else { /* deleguate our work to tcp_release_cb() */ - set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); + if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) + sock_hold(sk); } bh_unlock_sock(sk); sock_put(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b4c3582a991f..6f6d1aca3c3d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) uh->check = CSUM_MANGLED_0; send: - err = ip_send_skb(skb); + err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS_USER(sock_net(sk), diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 79181819a24f..6bc85f7c31e3 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -494,8 +494,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf) struct net_device *dev; struct inet6_dev *idev; - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { idev = __in6_dev_get(dev); if (idev) { int changed = (!idev->cnf.forwarding) ^ (!newf); @@ -504,7 +503,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf) dev_forward_change(idev); } } - rcu_read_unlock(); } static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index da2e92d05c15..745a32042950 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -307,10 +307,10 @@ static int __net_init ipv6_proc_init_net(struct net *net) goto proc_dev_snmp6_fail; return 0; +proc_dev_snmp6_fail: + proc_net_remove(net, "snmp6"); proc_snmp6_fail: proc_net_remove(net, "sockstat6"); -proc_dev_snmp6_fail: - proc_net_remove(net, "dev_snmp6"); return -ENOMEM; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c66b90f71c9b..a3e60cc04a8a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -94,6 +94,18 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, } #endif +static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + const struct rt6_info *rt = (const struct rt6_info *)dst; + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (rt->rt6i_node) + inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; +} + static void tcp_v6_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { @@ -1270,6 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newsk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(newsk, dst, NULL, NULL); + inet6_sk_rx_dst_set(newsk, skb); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; @@ -1447,7 +1460,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ + struct dst_entry *dst = sk->sk_rx_dst; + sock_rps_save_rxhash(sk, skb); + if (dst) { + if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || + dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + dst_release(dst); + sk->sk_rx_dst = NULL; + } + } + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; if (opt_skb) @@ -1705,9 +1728,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; struct inet_sock *icsk = inet_sk(sk); if (dst) - dst = dst_check(dst, 0); + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); if (dst && - icsk->rx_dst_ifindex == inet6_iif(skb)) + icsk->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } @@ -1723,6 +1746,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, + .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), @@ -1754,6 +1778,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, + .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ef39812107b1..f8c4c08ffb60 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -73,6 +73,13 @@ static int xfrm6_get_tos(const struct flowi *fl) return 0; } +static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst) +{ + struct rt6_info *rt = (struct rt6_info *)xdst; + + rt6_init_peer(rt, net->ipv6.peers); +} + static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { @@ -286,6 +293,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .get_saddr = xfrm6_get_saddr, .decode_session = _decode_session6, .get_tos = xfrm6_get_tos, + .init_dst = xfrm6_init_dst, .init_path = xfrm6_init_path, .fill_dst = xfrm6_fill_dst, .blackhole_route = ip6_blackhole_route, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 35e1e4bde587..927547171bc7 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, lsa->l2tp_family = AF_INET6; lsa->l2tp_flowinfo = 0; lsa->l2tp_scope_id = 0; + lsa->l2tp_unused = 0; if (peer) { if (!lsk->peer_conn_id) return -ENOTCONN; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index f6fe4d400502..c2190005a114 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_llc sllc; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); - int rc = 0; + int rc = -EBADF; memset(&sllc, 0, sizeof(sllc)); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; *uaddrlen = sizeof(sllc); - memset(uaddr, 0, *uaddrlen); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) @@ -1206,7 +1205,7 @@ static int __init llc2_init(void) rc = llc_proc_init(); if (rc != 0) { printk(llc_proc_err_msg); - goto out_unregister_llc_proto; + goto out_station; } rc = llc_sysctl_init(); if (rc) { @@ -1226,7 +1225,8 @@ out_sysctl: llc_sysctl_exit(); out_proc: llc_proc_exit(); -out_unregister_llc_proto: +out_station: + llc_station_exit(); proto_unregister(&llc_proto); goto out; } diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index e32cab44ea95..dd3e83328ad5 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -42,6 +42,7 @@ static void (*llc_type_handlers[2])(struct llc_sap *sap, void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, struct sk_buff *skb)) { + smp_wmb(); /* ensure initialisation is complete before it's called */ if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) llc_type_handlers[type - 1] = handler; } @@ -50,11 +51,19 @@ void llc_remove_pack(int type) { if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) llc_type_handlers[type - 1] = NULL; + synchronize_net(); } void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) { + /* Ensure initialisation is complete before it's called */ + if (handler) + smp_wmb(); + llc_station_handler = handler; + + if (!handler) + synchronize_net(); } /** @@ -150,6 +159,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, int dest; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*sta_handler)(struct sk_buff *skb); + void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb); if (!net_eq(dev_net(dev), &init_net)) goto drop; @@ -182,7 +193,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, */ rcv = rcu_dereference(sap->rcv_func); dest = llc_pdu_type(skb); - if (unlikely(!dest || !llc_type_handlers[dest - 1])) { + sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL; + if (unlikely(!sap_handler)) { if (rcv) rcv(skb, dev, pt, orig_dev); else @@ -193,7 +205,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, if (cskb) rcv(cskb, dev, pt, orig_dev); } - llc_type_handlers[dest - 1](sap, skb); + sap_handler(sap, skb); } llc_sap_put(sap); out: @@ -202,9 +214,10 @@ drop: kfree_skb(skb); goto out; handle_station: - if (!llc_station_handler) + sta_handler = ACCESS_ONCE(llc_station_handler); + if (!sta_handler) goto drop; - llc_station_handler(skb); + sta_handler(skb); goto out; } diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 39a8d8924b9c..b2f2bac2c2a2 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -687,12 +687,8 @@ static void llc_station_rcv(struct sk_buff *skb) llc_station_state_process(skb); } -int __init llc_station_init(void) +void __init llc_station_init(void) { - int rc = -ENOBUFS; - struct sk_buff *skb; - struct llc_station_state_ev *ev; - skb_queue_head_init(&llc_main_station.mac_pdu_q); skb_queue_head_init(&llc_main_station.ev_q.list); spin_lock_init(&llc_main_station.ev_q.lock); @@ -700,23 +696,12 @@ int __init llc_station_init(void) (unsigned long)&llc_main_station); llc_main_station.ack_timer.expires = jiffies + sysctl_llc_station_ack_timeout; - skb = alloc_skb(0, GFP_ATOMIC); - if (!skb) - goto out; - rc = 0; - llc_set_station_handler(llc_station_rcv); - ev = llc_station_ev(skb); - memset(ev, 0, sizeof(*ev)); llc_main_station.maximum_retry = 1; - llc_main_station.state = LLC_STATION_STATE_DOWN; - ev->type = LLC_STATION_EV_TYPE_SIMPLE; - ev->prim_type = LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK; - rc = llc_station_next_state(skb); -out: - return rc; + llc_main_station.state = LLC_STATION_STATE_UP; + llc_set_station_handler(llc_station_rcv); } -void __exit llc_station_exit(void) +void llc_station_exit(void) { llc_set_station_handler(NULL); } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6fac18c0423f..85572353a7e3 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) del_timer_sync(&sdata->u.mesh.housekeeping_timer); del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); + del_timer_sync(&sdata->u.mesh.mesh_path_timer); /* * If the timer fired while we waited for it, it will have * requeued the work. Now the work will be running again @@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) local->fif_other_bss--; atomic_dec(&local->iff_allmultis); ieee80211_configure_filter(local); + + sdata->u.mesh.timers_running = 0; } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cef0c9e79aba..a4a5acdbaa4d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); del_timer_sync(&sdata->u.mgd.chswitch_timer); + + sdata->u.mgd.timers_running = 0; } void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index bcaee5d12839..839dd9737989 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, if (local->scan_req != local->int_scan_req) cfg80211_scan_done(local->scan_req, aborted); local->scan_req = NULL; - local->scan_sdata = NULL; + rcu_assign_pointer(local->scan_sdata, NULL); local->scanning = 0; local->scan_channel = NULL; @@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) kfree(local->sched_scan_ies.ie[i]); drv_sched_scan_stop(local, sdata); - rcu_assign_pointer(local->sched_scan_sdata, NULL); } out: mutex_unlock(&local->mtx); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 84444dda194b..72bf32a84874 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2759,6 +2759,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { struct ip_vs_timeout_user t; + memset(&t, 0, sizeof(t)); __ip_vs_get_timeouts(net, &t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 45cf602a76bc..527651a53a45 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master, } } -static inline int refresh_timer(struct nf_conntrack_expect *i) -{ - struct nf_conn_help *master_help = nfct_help(i->master); - const struct nf_conntrack_expect_policy *p; - - if (!del_timer(&i->timeout)) - return 0; - - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[i->class]; - i->timeout.expires = jiffies + p->timeout * HZ; - add_timer(&i->timeout); - return 1; -} - static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) { const struct nf_conntrack_expect_policy *p; @@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) struct nf_conn_help *master_help = nfct_help(master); struct nf_conntrack_helper *helper; struct net *net = nf_ct_exp_net(expect); - struct hlist_node *n; + struct hlist_node *n, *next; unsigned int h; int ret = 1; @@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) goto out; } h = nf_ct_expect_dst_hash(&expect->tuple); - hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { if (expect_matches(i, expect)) { - /* Refresh timer: if it's dying, ignore.. */ - if (refresh_timer(i)) { - ret = 0; - goto out; + if (del_timer(&i->timeout)) { + nf_ct_unlink_expect(i); + nf_ct_expect_put(i); + break; } } else if (expect_clash(i, expect)) { ret = -EBUSY; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 14f67a2cbcb5..da4fc37a8578 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1896,10 +1896,15 @@ static int ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct) { struct nlattr *cda[CTA_MAX+1]; + int ret; nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); - return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); + spin_lock_bh(&nf_conntrack_lock); + ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); + spin_unlock_bh(&nf_conntrack_lock); + + return ret; } static struct nfq_ct_hook ctnetlink_nfqueue_hook = { diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 758a1bacc126..5c0a112aeee6 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -183,12 +183,12 @@ static int media_len(const struct nf_conn *ct, const char *dptr, return len + digits_len(ct, dptr, limit, shift); } -static int parse_addr(const struct nf_conn *ct, const char *cp, - const char **endp, union nf_inet_addr *addr, - const char *limit) +static int sip_parse_addr(const struct nf_conn *ct, const char *cp, + const char **endp, union nf_inet_addr *addr, + const char *limit, bool delim) { const char *end; - int ret = 0; + int ret; if (!ct) return 0; @@ -197,16 +197,28 @@ static int parse_addr(const struct nf_conn *ct, const char *cp, switch (nf_ct_l3num(ct)) { case AF_INET: ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); + if (ret == 0) + return 0; break; case AF_INET6: + if (cp < limit && *cp == '[') + cp++; + else if (delim) + return 0; + ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); + if (ret == 0) + return 0; + + if (end < limit && *end == ']') + end++; + else if (delim) + return 0; break; default: BUG(); } - if (ret == 0 || end == cp) - return 0; if (endp) *endp = end; return 1; @@ -219,7 +231,7 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr, union nf_inet_addr addr; const char *aux = dptr; - if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { + if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) { pr_debug("ip: %s parse failed.!\n", dptr); return 0; } @@ -296,7 +308,7 @@ int ct_sip_parse_request(const struct nf_conn *ct, return 0; dptr += shift; - if (!parse_addr(ct, dptr, &end, addr, limit)) + if (!sip_parse_addr(ct, dptr, &end, addr, limit, true)) return -1; if (end < limit && *end == ':') { end++; @@ -550,7 +562,7 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, if (ret == 0) return ret; - if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) + if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true)) return -1; if (*c == ':') { c++; @@ -599,7 +611,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen, - union nf_inet_addr *addr) + union nf_inet_addr *addr, bool delim) { const char *limit = dptr + datalen; const char *start, *end; @@ -613,7 +625,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, return 0; start += strlen(name); - if (!parse_addr(ct, start, &end, addr, limit)) + if (!sip_parse_addr(ct, start, &end, addr, limit, delim)) return 0; *matchoff = start - dptr; *matchlen = end - start; @@ -675,6 +687,47 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, return 1; } +static int sdp_parse_addr(const struct nf_conn *ct, const char *cp, + const char **endp, union nf_inet_addr *addr, + const char *limit) +{ + const char *end; + int ret; + + memset(addr, 0, sizeof(*addr)); + switch (nf_ct_l3num(ct)) { + case AF_INET: + ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); + break; + case AF_INET6: + ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); + break; + default: + BUG(); + } + + if (ret == 0) + return 0; + if (endp) + *endp = end; + return 1; +} + +/* skip ip address. returns its length. */ +static int sdp_addr_len(const struct nf_conn *ct, const char *dptr, + const char *limit, int *shift) +{ + union nf_inet_addr addr; + const char *aux = dptr; + + if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) { + pr_debug("ip: %s parse failed.!\n", dptr); + return 0; + } + + return dptr - aux; +} + /* SDP header parsing: a SDP session description contains an ordered set of * headers, starting with a section containing general session parameters, * optionally followed by multiple media descriptions. @@ -686,10 +739,10 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, */ static const struct sip_header ct_sdp_hdrs[] = { [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), - [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), - [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), - [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), - [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), + [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len), + [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len), + [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len), + [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len), [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), }; @@ -775,8 +828,8 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, if (ret <= 0) return ret; - if (!parse_addr(ct, dptr + *matchoff, NULL, addr, - dptr + *matchoff + *matchlen)) + if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr, + dptr + *matchoff + *matchlen)) return -1; return 1; } @@ -1515,7 +1568,6 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, } static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; -static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { [SIP_EXPECT_SIGNALLING] = { @@ -1585,9 +1637,9 @@ static int __init nf_conntrack_sip_init(void) sip[i][j].me = THIS_MODULE; if (ports[i] == SIP_PORT) - sprintf(sip_names[i][j], "sip"); + sprintf(sip[i][j].name, "sip"); else - sprintf(sip_names[i][j], "sip-%u", i); + sprintf(sip[i][j].name, "sip-%u", i); pr_debug("port #%u: %u\n", i, ports[i]); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5463969da45b..1445d73533ed 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1362,7 +1362,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &scm; - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ceaca7c134a0..aee7196aac36 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po, default: WARN(1, "TPACKET version not supported\n"); BUG(); - return 0; + return NULL; } } @@ -1273,6 +1273,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) spin_unlock(&f->lock); } +bool match_fanout_group(struct packet_type *ptype, struct sock * sk) +{ + if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) + return true; + + return false; +} + static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_sock *po = pkt_sk(sk); @@ -1325,6 +1333,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; + match->prot_hook.id_match = match_fanout_group; dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } @@ -1936,7 +1945,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb) if (likely(po->tx_ring.pg_vec)) { ph = skb_shinfo(skb)->destructor_arg; - BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); BUG_ON(atomic_read(&po->tx_ring.pending) == 0); atomic_dec(&po->tx_ring.pending); __packet_set_status(po, ph, TP_STATUS_AVAILABLE); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index f10fb8256442..05d60859d8e3 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, struct tcf_common *pc; int ret = 0; int err; +#ifdef CONFIG_GACT_PROB + struct tc_gact_p *p_parm = NULL; +#endif if (nla == NULL) return -EINVAL; @@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, #ifndef CONFIG_GACT_PROB if (tb[TCA_GACT_PROB] != NULL) return -EOPNOTSUPP; +#else + if (tb[TCA_GACT_PROB]) { + p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm->ptype >= MAX_RAND) + return -EINVAL; + } #endif pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); @@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, spin_lock_bh(&gact->tcf_lock); gact->tcf_action = parm->action; #ifdef CONFIG_GACT_PROB - if (tb[TCA_GACT_PROB] != NULL) { - struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm) { gact->tcfg_paction = p_parm->paction; gact->tcfg_pval = p_parm->pval; gact->tcfg_ptype = p_parm->ptype; @@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, spin_lock(&gact->tcf_lock); #ifdef CONFIG_GACT_PROB - if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) + if (gact->tcfg_ptype) action = gact_rand[gact->tcfg_ptype](gact); else action = gact->tcf_action; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60e281ad0f07..58fb3c7aab9e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -185,7 +185,12 @@ err3: err2: kfree(tname); err1: - kfree(pc); + if (ret == ACT_P_CREATED) { + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); + } return err; } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fe81cc18e9e0..9c0fd0c78814 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, out: if (err) { m->tcf_qstats.overlimits++; - /* should we be asking for packet to be dropped? - * may make sense for redirect case only - */ - retval = TC_ACT_SHOT; - } else { + if (m->tcfm_eaction != TCA_EGRESS_MIRROR) + retval = TC_ACT_SHOT; + else + retval = m->tcf_action; + } else retval = m->tcf_action; - } spin_unlock(&m->tcf_lock); return retval; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 26aa2f6ce257..45c53ab067a6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, p = to_pedit(pc); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { - kfree(pc); + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); return -ENOMEM; } ret = ACT_P_CREATED; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 3922f2a2821b..3714f60f0b3c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, d = to_defact(pc); ret = alloc_defdata(d, defdata); if (ret < 0) { - kfree(pc); + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); return ret; } d->tcf_action = parm->action; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 9af01f3df18c..e4723d31fdd5 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -203,6 +203,34 @@ out: return index; } +/* Length of the next packet (0 if the queue is empty). */ +static unsigned int qdisc_peek_len(struct Qdisc *sch) +{ + struct sk_buff *skb; + + skb = sch->ops->peek(sch); + return skb ? qdisc_pkt_len(skb) : 0; +} + +static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *); +static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, + unsigned int len); + +static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl, + u32 lmax, u32 inv_w, int delta_w) +{ + int i; + + /* update qfq-specific data */ + cl->lmax = lmax; + cl->inv_w = inv_w; + i = qfq_calc_index(cl->inv_w, cl->lmax); + + cl->grp = &q->groups[i]; + + q->wsum += delta_w; +} + static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { @@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, lmax = 1UL << QFQ_MTU_SHIFT; if (cl != NULL) { + bool need_reactivation = false; + if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), @@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } - if (inv_w != cl->inv_w) { - sch_tree_lock(sch); - q->wsum += delta_w; - cl->inv_w = inv_w; - sch_tree_unlock(sch); + if (lmax == cl->lmax && inv_w == cl->inv_w) + return 0; /* nothing to update */ + + i = qfq_calc_index(inv_w, lmax); + sch_tree_lock(sch); + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { + /* + * shift cl->F back, to not charge the + * class for the not-yet-served head + * packet + */ + cl->F = cl->S; + /* remove class from its slot in the old group */ + qfq_deactivate_class(q, cl); + need_reactivation = true; } + + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); + + if (need_reactivation) /* activate in new group */ + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); + sch_tree_unlock(sch); + return 0; } @@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->refcnt = 1; cl->common.classid = classid; - cl->lmax = lmax; - cl->inv_w = inv_w; - i = qfq_calc_index(cl->inv_w, cl->lmax); - cl->grp = &q->groups[i]; + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); @@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } } - q->wsum += weight; sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); @@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V) } } -/* What is length of next packet in queue (0 if queue is empty) */ -static unsigned int qdisc_peek_len(struct Qdisc *sch) -{ - struct sk_buff *skb; - - skb = sch->ops->peek(sch); - return skb ? qdisc_pkt_len(skb) : 0; -} - /* * Updates the class, returns true if also the group needs to be updated. */ @@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); - struct qfq_group *grp; struct qfq_class *cl; int err; - u64 roundedS; - int s; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { @@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; /* If reach this point, queue q was idle */ - grp = cl->grp; + qfq_activate_class(q, cl, qdisc_pkt_len(skb)); + + return err; +} + +/* + * Handle class switch from idle to backlogged. + */ +static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, + unsigned int pkt_len) +{ + struct qfq_group *grp = cl->grp; + u64 roundedS; + int s; + qfq_update_start(q, cl); /* compute new finish time and rounded start. */ - cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; + cl->F = cl->S + (u64)pkt_len * cl->inv_w; roundedS = qfq_round_down(cl->S, grp->slot_shift); /* @@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) skip_update: qfq_slot_insert(grp, cl, roundedS); - - return err; } diff --git a/net/socket.c b/net/socket.c index dfe5b66c97e0..a5471f804d99 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2657,6 +2657,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; + memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 79981d97bc9c..c5ee4ff61364 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -823,6 +823,34 @@ fail: return NULL; } +static int unix_mknod(const char *sun_path, umode_t mode, struct path *res) +{ + struct dentry *dentry; + struct path path; + int err = 0; + /* + * Get the parent directory, calculate the hash for last + * component. + */ + dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); + err = PTR_ERR(dentry); + if (IS_ERR(dentry)) + return err; + + /* + * All right, let's create it. + */ + err = security_path_mknod(&path, dentry, mode, 0); + if (!err) { + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); + if (!err) { + res->mnt = mntget(path.mnt); + res->dentry = dget(dentry); + } + } + done_path_create(&path, dentry); + return err; +} static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { @@ -831,8 +859,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; char *sun_path = sunaddr->sun_path; - struct dentry *dentry = NULL; - struct path path; int err; unsigned int hash; struct unix_address *addr; @@ -869,43 +895,23 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) atomic_set(&addr->refcnt, 1); if (sun_path[0]) { - umode_t mode; - err = 0; - /* - * Get the parent directory, calculate the hash for last - * component. - */ - dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); - err = PTR_ERR(dentry); - if (IS_ERR(dentry)) - goto out_mknod_parent; - - /* - * All right, let's create it. - */ - mode = S_IFSOCK | + struct path path; + umode_t mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current_umask()); - err = mnt_want_write(path.mnt); - if (err) - goto out_mknod_dput; - err = security_path_mknod(&path, dentry, mode, 0); - if (err) - goto out_mknod_drop_write; - err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); -out_mknod_drop_write: - mnt_drop_write(path.mnt); - if (err) - goto out_mknod_dput; - mutex_unlock(&path.dentry->d_inode->i_mutex); - dput(path.dentry); - path.dentry = dentry; - + err = unix_mknod(sun_path, mode, &path); + if (err) { + if (err == -EEXIST) + err = -EADDRINUSE; + unix_release_addr(addr); + goto out_up; + } addr->hash = UNIX_HASH_SIZE; - } - - spin_lock(&unix_table_lock); - - if (!sun_path[0]) { + hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1); + spin_lock(&unix_table_lock); + u->path = path; + list = &unix_socket_table[hash]; + } else { + spin_lock(&unix_table_lock); err = -EADDRINUSE; if (__unix_find_socket_byname(net, sunaddr, addr_len, sk->sk_type, hash)) { @@ -914,9 +920,6 @@ out_mknod_drop_write: } list = &unix_socket_table[addr->hash]; - } else { - list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; - u->path = path; } err = 0; @@ -930,16 +933,6 @@ out_up: mutex_unlock(&u->readlock); out: return err; - -out_mknod_dput: - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); -out_mknod_parent: - if (err == -EEXIST) - err = -EADDRINUSE; - unix_release_addr(addr); - goto out_up; } static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) @@ -1457,7 +1450,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; @@ -1626,7 +1619,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; diff --git a/net/wireless/core.c b/net/wireless/core.c index 31b40cc4a9c3..dcd64d5b07aa 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, */ synchronize_rcu(); INIT_LIST_HEAD(&wdev->list); + /* + * Ensure that all events have been processed and + * freed. + */ + cfg80211_process_wdev_events(wdev); break; case NETDEV_PRE_UP: if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) diff --git a/net/wireless/core.h b/net/wireless/core.h index 5206c6844fd7..bc7430b54771 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, u32 *flags, struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); +void cfg80211_process_wdev_events(struct wireless_dev *wdev); int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2303ee73b50a..2ded3c7fad06 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_IBSS; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; + if (rd_flags & NL80211_RRF_NO_OFDM) + channel_flags |= IEEE80211_CHAN_NO_OFDM; return channel_flags; } @@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy, chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); - chan->max_power = min(chan->max_power, chan->max_reg_power); + if (chan->orig_mpwr) { + /* + * Devices that have their own custom regulatory domain + * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the + * passed country IE power settings. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && + wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) + chan->max_power = chan->max_reg_power; + else + chan->max_power = min(chan->orig_mpwr, + chan->max_reg_power); + } else + chan->max_power = chan->max_reg_power; } static void handle_band(struct wiphy *wiphy, @@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy) chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; + chan->beacon_found = false; } } } diff --git a/net/wireless/util.c b/net/wireless/util.c index 26f8cd30f712..994e2f0cc7a8 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) wdev->connect_keys = NULL; } -static void cfg80211_process_wdev_events(struct wireless_dev *wdev) +void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index c5a5165a5927..5a2aa17e4d3c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1357,6 +1357,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); xdst->flo.ops = &xfrm_bundle_fc_ops; + if (afinfo->init_dst) + afinfo->init_dst(net, xdst); } else xdst = ERR_PTR(-ENOBUFS); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5b228f97d4b3..87cd0e4d4282 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) if (x->lft.hard_add_expires_seconds) { long tmo = x->lft.hard_add_expires_seconds + x->curlft.add_time - now; - if (tmo <= 0) - goto expired; + if (tmo <= 0) { + if (x->xflags & XFRM_SOFT_EXPIRE) { + /* enter hard expire without soft expire first?! + * setting a new date could trigger this. + * workarbound: fix x->curflt.add_time by below: + */ + x->curlft.add_time = now - x->saved_tmo - 1; + tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; + } else + goto expired; + } if (tmo < next) next = tmo; } @@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) if (x->lft.soft_add_expires_seconds) { long tmo = x->lft.soft_add_expires_seconds + x->curlft.add_time - now; - if (tmo <= 0) + if (tmo <= 0) { warn = 1; - else if (tmo < next) + x->xflags &= ~XFRM_SOFT_EXPIRE; + } else if (tmo < next) { next = tmo; + x->xflags |= XFRM_SOFT_EXPIRE; + x->saved_tmo = tmo; + } } if (x->lft.soft_use_expires_seconds) { long tmo = x->lft.soft_use_expires_seconds + diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 913d6bdfdda3..ca05ba217f5f 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3016,7 +3016,8 @@ sub process { $herectx .= raw_line($linenr, $n) . "\n"; } - if (($stmts =~ tr/;/;/) == 1) { + if (($stmts =~ tr/;/;/) == 1 && + $stmts !~ /^\s*(if|while|for|switch)\b/) { WARN("SINGLE_STATEMENT_DO_WHILE_MACRO", "Single statement macros should not use a do {} while (0) loop\n" . "$herectx"); } diff --git a/scripts/decodecode b/scripts/decodecode index 18ba881c3415..4f8248d5a11f 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -89,7 +89,7 @@ echo $code >> $T.s disas $T cat $T.dis >> $T.aa -faultline=`cat $T.dis | head -1 | cut -d":" -f2` +faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g" diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 9b0c0b8b4ab4..8fd107a3fac4 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1786,6 +1786,7 @@ sub dump_function($$) { $prototype =~ s/__init +//; $prototype =~ s/__init_or_module +//; $prototype =~ s/__must_check +//; + $prototype =~ s/__weak +//; $prototype =~ s/^#\s*define\s+//; #ak added $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 83554ee8a587..0cc99a3ea42d 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -279,12 +279,46 @@ static int yama_ptrace_access_check(struct task_struct *child, } if (rc) { - char name[sizeof(current->comm)]; printk_ratelimited(KERN_NOTICE "ptrace of pid %d was attempted by: %s (pid %d)\n", - child->pid, - get_task_comm(name, current), - current->pid); + child->pid, current->comm, current->pid); + } + + return rc; +} + +/** + * yama_ptrace_traceme - validate PTRACE_TRACEME calls + * @parent: task that will become the ptracer of the current task + * + * Returns 0 if following the ptrace is allowed, -ve on error. + */ +static int yama_ptrace_traceme(struct task_struct *parent) +{ + int rc; + + /* If standard caps disallows it, so does Yama. We should + * only tighten restrictions further. + */ + rc = cap_ptrace_traceme(parent); + if (rc) + return rc; + + /* Only disallow PTRACE_TRACEME on more aggressive settings. */ + switch (ptrace_scope) { + case YAMA_SCOPE_CAPABILITY: + if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE)) + rc = -EPERM; + break; + case YAMA_SCOPE_NO_ATTACH: + rc = -EPERM; + break; + } + + if (rc) { + printk_ratelimited(KERN_NOTICE + "ptraceme of pid %d was attempted by: %s (pid %d)\n", + current->pid, parent->comm, parent->pid); } return rc; @@ -294,6 +328,7 @@ static struct security_operations yama_ops = { .name = "yama", .ptrace_access_check = yama_ptrace_access_check, + .ptrace_traceme = yama_ptrace_traceme, .task_prctl = yama_task_prctl, .task_free = yama_task_free, }; diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c index 0d7b25e81643..4e1fda75c1c9 100644 --- a/sound/arm/pxa2xx-ac97.c +++ b/sound/arm/pxa2xx-ac97.c @@ -106,7 +106,7 @@ static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = { .prepare = pxa2xx_ac97_pcm_prepare, }; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int pxa2xx_ac97_do_suspend(struct snd_card *card) { @@ -243,7 +243,7 @@ static struct platform_driver pxa2xx_ac97_driver = { .driver = { .name = "pxa2xx-ac97", .owner = THIS_MODULE, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .pm = &pxa2xx_ac97_pm_ops, #endif }, diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index eb4ceb71123e..277ebce23a45 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c @@ -452,6 +452,7 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev) dac->regs = ioremap(regs->start, resource_size(regs)); if (!dac->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); + retval = -ENOMEM; goto out_free_card; } @@ -534,7 +535,7 @@ out_put_pclk: return retval; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int atmel_abdac_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index bf47025bdf45..9052aff37f64 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c @@ -278,14 +278,9 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream, if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ - if (cpu_is_at32ap7000()) { - if (retval < 0) - return retval; - /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ - if (retval == 1) - if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) - dw_dma_cyclic_free(chip->dma.rx_chan); - } + if (cpu_is_at32ap7000() && retval == 1) + if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) + dw_dma_cyclic_free(chip->dma.rx_chan); /* Set restrictions to params. */ mutex_lock(&opened_mutex); @@ -980,6 +975,7 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev) if (!chip->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); + retval = -ENOMEM; goto err_ioremap; } @@ -1134,7 +1130,7 @@ err_snd_card_new: return retval; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int atmel_ac97c_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index 4e7ec2b49873..d0f00356fc11 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c @@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device, if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, chunk, &tmpb) < 0) { if (!sgbuf->pages) - return NULL; + goto _failed; if (!res_size) goto _failed; size = sgbuf->pages * PAGE_SIZE; diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 1128b35b2b05..5a34355e78e8 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -1176,7 +1176,7 @@ static int __devexit loopback_remove(struct platform_device *devptr) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int loopback_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index f7d3bfc6bca8..54bb6644a598 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c @@ -1064,7 +1064,7 @@ static int __devexit snd_dummy_remove(struct platform_device *devptr) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int snd_dummy_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c index 1cff331a228e..4608c2ca43f8 100644 --- a/sound/drivers/mpu401/mpu401_uart.c +++ b/sound/drivers/mpu401/mpu401_uart.c @@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device, spin_lock_init(&mpu->output_lock); spin_lock_init(&mpu->timer_lock); mpu->hardware = hardware; + mpu->irq = -1; if (! (info_flags & MPU401_INFO_INTEGRATED)) { int res_size = hardware == MPU401_HW_PC98II ? 4 : 2; mpu->res = request_region(port, res_size, "MPU401 UART"); diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c index 6ca59fc6dcb9..ef171295f6d4 100644 --- a/sound/drivers/pcsp/pcsp.c +++ b/sound/drivers/pcsp/pcsp.c @@ -199,7 +199,7 @@ static void pcsp_stop_beep(struct snd_pcsp *chip) pcspkr_stop_sound(); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int pcsp_suspend(struct device *dev) { struct snd_pcsp *chip = dev_get_drvdata(dev); @@ -212,7 +212,7 @@ static SIMPLE_DEV_PM_OPS(pcsp_pm, pcsp_suspend, NULL); #define PCSP_PM_OPS &pcsp_pm #else #define PCSP_PM_OPS NULL -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static void pcsp_shutdown(struct platform_device *dev) { diff --git a/sound/isa/als100.c b/sound/isa/als100.c index 2d67c78c9f4b..f7cdaf51512d 100644 --- a/sound/isa/als100.c +++ b/sound/isa/als100.c @@ -233,7 +233,7 @@ static int __devinit snd_card_als100_probe(int dev, irq[dev], dma8[dev], dma16[dev]); } - if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) { + if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) { snd_card_free(card); return error; } diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c index 1d47be8170b5..b3b4f15e45ba 100644 --- a/sound/isa/es1688/es1688_lib.c +++ b/sound/isa/es1688/es1688_lib.c @@ -612,10 +612,10 @@ static int snd_es1688_capture_close(struct snd_pcm_substream *substream) static int snd_es1688_free(struct snd_es1688 *chip) { - if (chip->res_port) { + if (chip->hardware != ES1688_HW_UNDEF) snd_es1688_init(chip, 0); + if (chip->res_port) release_and_free_resource(chip->res_port); - } if (chip->irq >= 0) free_irq(chip->irq, (void *) chip); if (chip->dma8 >= 0) { @@ -657,19 +657,27 @@ int snd_es1688_create(struct snd_card *card, return -ENOMEM; chip->irq = -1; chip->dma8 = -1; + chip->hardware = ES1688_HW_UNDEF; - if ((chip->res_port = request_region(port + 4, 12, "ES1688")) == NULL) { + chip->res_port = request_region(port + 4, 12, "ES1688"); + if (chip->res_port == NULL) { snd_printk(KERN_ERR "es1688: can't grab port 0x%lx\n", port + 4); - return -EBUSY; + err = -EBUSY; + goto exit; } - if (request_irq(irq, snd_es1688_interrupt, 0, "ES1688", (void *) chip)) { + + err = request_irq(irq, snd_es1688_interrupt, 0, "ES1688", (void *) chip); + if (err < 0) { snd_printk(KERN_ERR "es1688: can't grab IRQ %d\n", irq); - return -EBUSY; + goto exit; } + chip->irq = irq; - if (request_dma(dma8, "ES1688")) { + err = request_dma(dma8, "ES1688"); + + if (err < 0) { snd_printk(KERN_ERR "es1688: can't grab DMA8 %d\n", dma8); - return -EBUSY; + goto exit; } chip->dma8 = dma8; @@ -685,14 +693,18 @@ int snd_es1688_create(struct snd_card *card, err = snd_es1688_probe(chip); if (err < 0) - return err; + goto exit; err = snd_es1688_init(chip, 1); if (err < 0) - return err; + goto exit; /* Register device */ - return snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); +exit: + if (err) + snd_es1688_free(chip); + return err; } static struct snd_pcm_ops snd_es1688_playback_ops = { diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c index 733b014ec7d1..b2b3c014221a 100644 --- a/sound/oss/sb_audio.c +++ b/sound/oss/sb_audio.c @@ -575,13 +575,15 @@ static int jazz16_audio_set_speed(int dev, int speed) if (speed > 0) { int tmp; - int s = speed * devc->channels; + int s; if (speed < 5000) speed = 5000; if (speed > 44100) speed = 44100; + s = speed * devc->channels; + devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff; tmp = 256 - devc->tconst; diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c index f75f5ffdfdfb..a71d1c14a0f6 100644 --- a/sound/pci/cs46xx/cs46xx_lib.c +++ b/sound/pci/cs46xx/cs46xx_lib.c @@ -94,7 +94,7 @@ static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip, if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) - return -EINVAL; + return 0xffff; chip->active_ctrl(chip, 1); diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index 8e40262d4117..2f6e9c762d3f 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -1725,8 +1725,10 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, atc_connect_resources(atc); atc->timer = ct_timer_new(atc); - if (!atc->timer) + if (!atc->timer) { + err = -ENOMEM; goto error1; + } err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops); if (err < 0) diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index 4f502a2bdc3c..0a436626182b 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c @@ -326,7 +326,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst for (page = blk->first_page; page <= blk->last_page; page++, idx++) { unsigned long ofs = idx << PAGE_SHIFT; dma_addr_t addr; - addr = snd_pcm_sgbuf_get_addr(substream, ofs); + if (ofs >= runtime->dma_bytes) + addr = emu->silent_page.addr; + else + addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) { printk(KERN_ERR "emu: failure page = %d\n", idx); mutex_unlock(&hdr->block_mutex); diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 647218d69f68..4f7d2dfcef7b 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -332,13 +332,12 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec, if (cfg->dig_outs) snd_printd(" dig-out=0x%x/0x%x\n", cfg->dig_out_pins[0], cfg->dig_out_pins[1]); - snd_printd(" inputs:"); + snd_printd(" inputs:\n"); for (i = 0; i < cfg->num_inputs; i++) { - snd_printd(" %s=0x%x", + snd_printd(" %s=0x%x\n", hda_get_autocfg_input_label(codec, cfg, i), cfg->inputs[i].pin); } - snd_printd("\n"); if (cfg->dig_in_pin) snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin); diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index 0bc2315b181d..0849aac449f2 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -231,16 +231,22 @@ void snd_hda_detach_beep_device(struct hda_codec *codec) } EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device); +static bool ctl_has_mute(struct snd_kcontrol *kcontrol) +{ + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); + return query_amp_caps(codec, get_amp_nid(kcontrol), + get_amp_direction(kcontrol)) & AC_AMPCAP_MUTE; +} + /* get/put callbacks for beep mute mixer switches */ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_beep *beep = codec->beep; - if (beep) { + if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) { ucontrol->value.integer.value[0] = - ucontrol->value.integer.value[1] = - beep->enabled; + ucontrol->value.integer.value[1] = beep->enabled; return 0; } return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol); @@ -252,9 +258,20 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol, { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_beep *beep = codec->beep; - if (beep) - snd_hda_enable_beep_device(codec, - *ucontrol->value.integer.value); + if (beep) { + u8 chs = get_amp_channels(kcontrol); + int enable = 0; + long *valp = ucontrol->value.integer.value; + if (chs & 1) { + enable |= *valp; + valp++; + } + if (chs & 2) + enable |= *valp; + snd_hda_enable_beep_device(codec, enable); + } + if (!ctl_has_mute(kcontrol)) + return 0; return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol); } EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 88a9c20eb7a2..f560051a949e 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1386,6 +1386,44 @@ int snd_hda_codec_configure(struct hda_codec *codec) } EXPORT_SYMBOL_HDA(snd_hda_codec_configure); +/* update the stream-id if changed */ +static void update_pcm_stream_id(struct hda_codec *codec, + struct hda_cvt_setup *p, hda_nid_t nid, + u32 stream_tag, int channel_id) +{ + unsigned int oldval, newval; + + if (p->stream_tag != stream_tag || p->channel_id != channel_id) { + oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); + newval = (stream_tag << 4) | channel_id; + if (oldval != newval) + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_CHANNEL_STREAMID, + newval); + p->stream_tag = stream_tag; + p->channel_id = channel_id; + } +} + +/* update the format-id if changed */ +static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p, + hda_nid_t nid, int format) +{ + unsigned int oldval; + + if (p->format_id != format) { + oldval = snd_hda_codec_read(codec, nid, 0, + AC_VERB_GET_STREAM_FORMAT, 0); + if (oldval != format) { + msleep(1); + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_STREAM_FORMAT, + format); + } + p->format_id = format; + } +} + /** * snd_hda_codec_setup_stream - set up the codec for streaming * @codec: the CODEC to set up @@ -1400,7 +1438,6 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, { struct hda_codec *c; struct hda_cvt_setup *p; - unsigned int oldval, newval; int type; int i; @@ -1413,29 +1450,13 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, p = get_hda_cvt_setup(codec, nid); if (!p) return; - /* update the stream-id if changed */ - if (p->stream_tag != stream_tag || p->channel_id != channel_id) { - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); - newval = (stream_tag << 4) | channel_id; - if (oldval != newval) - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_CHANNEL_STREAMID, - newval); - p->stream_tag = stream_tag; - p->channel_id = channel_id; - } - /* update the format-id if changed */ - if (p->format_id != format) { - oldval = snd_hda_codec_read(codec, nid, 0, - AC_VERB_GET_STREAM_FORMAT, 0); - if (oldval != format) { - msleep(1); - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_STREAM_FORMAT, - format); - } - p->format_id = format; - } + + if (codec->pcm_format_first) + update_pcm_format(codec, p, nid, format); + update_pcm_stream_id(codec, p, nid, stream_tag, channel_id); + if (!codec->pcm_format_first) + update_pcm_format(codec, p, nid, format); + p->active = 1; p->dirty = 0; @@ -3497,7 +3518,7 @@ static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg { int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE); - if (sup < 0) + if (sup == -1) return false; if (sup & power_state) return true; @@ -4433,6 +4454,8 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down) * then there is no need to go through power up here. */ if (codec->power_on) { + if (codec->power_transition < 0) + codec->power_transition = 0; spin_unlock(&codec->power_lock); return; } diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index c422d330ca54..7fbc1bcaf1a9 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -861,6 +861,7 @@ struct hda_codec { unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ unsigned int no_jack_detect:1; /* Machine has no jack-detection */ + unsigned int pcm_format_first:1; /* PCM format must be set first */ #ifdef CONFIG_SND_HDA_POWER_SAVE unsigned int power_on :1; /* current (global) power-state */ int power_transition; /* power-state in transition */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c8aced182fd1..60882c62f180 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -151,6 +151,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," "{Intel, CPT}," "{Intel, PPT}," "{Intel, LPT}," + "{Intel, LPT_LP}," "{Intel, HPT}," "{Intel, PBG}," "{Intel, SCH}," @@ -3270,6 +3271,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { { PCI_DEVICE(0x8086, 0x8c20), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, + /* Lynx Point-LP */ + { PCI_DEVICE(0x8086, 0x9c20), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | + AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, + /* Lynx Point-LP */ + { PCI_DEVICE(0x8086, 0x9c21), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | + AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0c0c), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index 7e46258fc700..6894ec66258c 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c @@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer, if (digi1 & AC_DIG1_EMPHASIS) snd_iprintf(buffer, " Preemphasis"); if (digi1 & AC_DIG1_COPYRIGHT) - snd_iprintf(buffer, " Copyright"); + snd_iprintf(buffer, " Non-Copyright"); if (digi1 & AC_DIG1_NONAUDIO) snd_iprintf(buffer, " Non-Audio"); if (digi1 & AC_DIG1_PROFESSIONAL) diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index d0d3540e39e7..49750a96d649 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -246,7 +246,7 @@ static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac) AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); } - if (dac) + if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP)) snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO); } @@ -261,7 +261,7 @@ static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc) AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } - if (adc) + if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP)) snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } @@ -275,6 +275,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); + if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) { + snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid); + return 0; + } sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } @@ -286,6 +290,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); + if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) { + snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid); + return 0; + } sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } @@ -464,50 +472,17 @@ exit: } /* - * PCM stuffs + * PCM callbacks */ -static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, - u32 stream_tag, - int channel_id, int format) +static int ca0132_playback_pcm_open(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { - unsigned int oldval, newval; - - if (!nid) - return; - - snd_printdd("ca0132_setup_stream: " - "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n", - nid, stream_tag, channel_id, format); - - /* update the format-id if changed */ - oldval = snd_hda_codec_read(codec, nid, 0, - AC_VERB_GET_STREAM_FORMAT, - 0); - if (oldval != format) { - msleep(20); - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_STREAM_FORMAT, - format); - } - - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); - newval = (stream_tag << 4) | channel_id; - if (oldval != newval) { - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_CHANNEL_STREAMID, - newval); - } -} - -static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid) -{ - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0); - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0); + struct ca0132_spec *spec = codec->spec; + return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, + hinfo); } -/* - * PCM callbacks - */ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, @@ -515,10 +490,8 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); - - return 0; + return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, + stream_tag, format, substream); } static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, @@ -526,92 +499,45 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->dacs[0]); - - return 0; + return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); } /* * Digital out */ -static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - unsigned int stream_tag, - unsigned int format, - struct snd_pcm_substream *substream) +static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format); - - return 0; + return snd_hda_multi_out_dig_open(codec, &spec->multiout); } -static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - struct snd_pcm_substream *substream) -{ - struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->dig_out); - - return 0; -} - -/* - * Analog capture - */ -static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo, +static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->adcs[substream->number], - stream_tag, 0, format); - - return 0; + return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, + stream_tag, format, substream); } -static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, +static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->adcs[substream->number]); - - return 0; + return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); } -/* - * Digital capture - */ -static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - unsigned int stream_tag, - unsigned int format, - struct snd_pcm_substream *substream) +static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format); - - return 0; -} - -static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - struct snd_pcm_substream *substream) -{ - struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->dig_in); - - return 0; + return snd_hda_multi_out_dig_close(codec, &spec->multiout); } /* @@ -621,6 +547,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = { .channels_min = 2, .channels_max = 2, .ops = { + .open = ca0132_playback_pcm_open, .prepare = ca0132_playback_pcm_prepare, .cleanup = ca0132_playback_pcm_cleanup }, @@ -630,10 +557,6 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, - .ops = { - .prepare = ca0132_capture_pcm_prepare, - .cleanup = ca0132_capture_pcm_cleanup - }, }; static struct hda_pcm_stream ca0132_pcm_digital_playback = { @@ -641,6 +564,8 @@ static struct hda_pcm_stream ca0132_pcm_digital_playback = { .channels_min = 2, .channels_max = 2, .ops = { + .open = ca0132_dig_playback_pcm_open, + .close = ca0132_dig_playback_pcm_close, .prepare = ca0132_dig_playback_pcm_prepare, .cleanup = ca0132_dig_playback_pcm_cleanup }, @@ -650,10 +575,6 @@ static struct hda_pcm_stream ca0132_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, - .ops = { - .prepare = ca0132_dig_capture_pcm_prepare, - .cleanup = ca0132_dig_capture_pcm_cleanup - }, }; static int ca0132_build_pcms(struct hda_codec *codec) @@ -928,18 +849,16 @@ static int ca0132_build_controls(struct hda_codec *codec) spec->dig_out); if (err < 0) return err; - err = add_out_volume(codec, spec->dig_out, "IEC958"); + err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; + /* spec->multiout.share_spdif = 1; */ } if (spec->dig_in) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in); if (err < 0) return err; - err = add_in_volume(codec, spec->dig_in, "IEC958"); - if (err < 0) - return err; } return 0; } @@ -961,6 +880,9 @@ static void ca0132_config(struct hda_codec *codec) struct ca0132_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; + codec->pcm_format_first = 1; + codec->no_sticky_stream = 1; + /* line-outs */ cfg->line_outs = 1; cfg->line_out_pins[0] = 0x0b; /* front */ @@ -988,14 +910,24 @@ static void ca0132_config(struct hda_codec *codec) /* Mic-in */ spec->input_pins[0] = 0x12; - spec->input_labels[0] = "Mic-In"; + spec->input_labels[0] = "Mic"; spec->adcs[0] = 0x07; /* Line-In */ spec->input_pins[1] = 0x11; - spec->input_labels[1] = "Line-In"; + spec->input_labels[1] = "Line"; spec->adcs[1] = 0x08; spec->num_inputs = 2; + + /* SPDIF I/O */ + spec->dig_out = 0x05; + spec->multiout.dig_out_nid = spec->dig_out; + cfg->dig_out_pins[0] = 0x0c; + cfg->dig_outs = 1; + cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; + spec->dig_in = 0x09; + cfg->dig_in_pin = 0x0e; + cfg->dig_in_type = HDA_PCM_TYPE_SPDIF; } static void ca0132_init_chip(struct hda_codec *codec) diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 14361184ae1e..5e22a8f43d2e 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -2967,12 +2967,10 @@ static const char * const cxt5066_models[CXT5066_MODELS] = { }; static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { - SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO), SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), - SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), @@ -2988,14 +2986,10 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO), - SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO), - SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO), {} }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 641408dc28c0..8f23374fa642 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1165,14 +1165,20 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { + snd_hda_codec_cleanup_stream(codec, hinfo->nid); + return 0; +} + +static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) +{ struct hdmi_spec *spec = codec->spec; int cvt_idx, pin_idx; struct hdmi_spec_per_cvt *per_cvt; struct hdmi_spec_per_pin *per_pin; int pinctl; - snd_hda_codec_cleanup_stream(codec, hinfo->nid); - if (hinfo->nid) { cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid); if (snd_BUG_ON(cvt_idx < 0)) @@ -1195,12 +1201,12 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, pinctl & ~PIN_OUT); snd_hda_spdif_ctls_unassign(codec, pin_idx); } - return 0; } static const struct hda_pcm_ops generic_ops = { .open = hdmi_pcm_open, + .close = hdmi_pcm_close, .prepare = generic_hdmi_playback_pcm_prepare, .cleanup = generic_hdmi_playback_pcm_cleanup, }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f141395dfee6..4f81dd44c837 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -203,6 +203,7 @@ struct alc_spec { unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */ unsigned int inv_dmic_fixup:1; /* has inverted digital-mic workaround */ unsigned int inv_dmic_muted:1; /* R-ch of inv d-mic is muted? */ + unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */ /* auto-mute control */ int automute_mode; @@ -4323,7 +4324,8 @@ static int alc_parse_auto_config(struct hda_codec *codec, return 0; /* can't find valid BIOS pin config */ } - if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && + if (!spec->no_primary_hp && + cfg->line_out_type == AUTO_PIN_SPEAKER_OUT && cfg->line_outs <= cfg->hp_outs) { /* use HP as primary out */ cfg->speaker_outs = cfg->line_outs; @@ -5050,6 +5052,7 @@ enum { ALC889_FIXUP_MBP_VREF, ALC889_FIXUP_IMAC91_VREF, ALC882_FIXUP_INV_DMIC, + ALC882_FIXUP_NO_PRIMARY_HP, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -5171,6 +5174,17 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec, spec->keep_vref_in_automute = 1; } +/* Don't take HP output as primary + * strangely, the speaker output doesn't work on VAIO Z through DAC 0x05 + */ +static void alc882_fixup_no_primary_hp(struct hda_codec *codec, + const struct alc_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + if (action == ALC_FIXUP_ACT_PRE_PROBE) + spec->no_primary_hp = 1; +} + static const struct alc_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { .type = ALC_FIXUP_PINS, @@ -5357,6 +5371,10 @@ static const struct alc_fixup alc882_fixups[] = { .type = ALC_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic_0x12, }, + [ALC882_FIXUP_NO_PRIMARY_HP] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc882_fixup_no_primary_hp, + }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -5391,6 +5409,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -5432,6 +5451,7 @@ static const struct alc_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"}, {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"}, {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"}, + {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {} }; @@ -6079,6 +6099,8 @@ static const struct alc_fixup alc269_fixups[] = { [ALC269_FIXUP_PCM_44K] = { .type = ALC_FIXUP_FUNC, .v.func = alc269_fixup_pcm_44k, + .chained = true, + .chain_id = ALC269_FIXUP_QUANTA_MUTE }, [ALC269_FIXUP_STEREO_DMIC] = { .type = ALC_FIXUP_FUNC, @@ -6186,9 +6208,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K), + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), #if 0 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index a1596a3b171c..ea5775a1a7db 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -101,6 +101,8 @@ enum { STAC_92HD83XXX_HP_cNB11_INTQUAD, STAC_HP_DV7_4000, STAC_HP_ZEPHYR, + STAC_92HD83XXX_HP_LED, + STAC_92HD83XXX_HP_INV_LED, STAC_92HD83XXX_MODELS }; @@ -1675,6 +1677,8 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad", [STAC_HP_DV7_4000] = "hp-dv7-4000", [STAC_HP_ZEPHYR] = "hp-zephyr", + [STAC_92HD83XXX_HP_LED] = "hp-led", + [STAC_92HD83XXX_HP_INV_LED] = "hp-inv-led", }; static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { @@ -1729,6 +1733,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561, "HP", STAC_HP_ZEPHYR), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660, + "HP Mini", STAC_92HD83XXX_HP_LED), {} /* terminator */ }; @@ -4266,7 +4272,8 @@ static int stac92xx_init(struct hda_codec *codec) unsigned int gpio; int i; - snd_hda_sequence_write(codec, spec->init); + if (spec->init) + snd_hda_sequence_write(codec, spec->init); /* power down adcs initially */ if (spec->powerdown_adcs) @@ -4414,7 +4421,12 @@ static int stac92xx_init(struct hda_codec *codec) snd_hda_jack_report_sync(codec); /* sync mute LED */ - snd_hda_sync_vmaster_hook(&spec->vmaster_mute); + if (spec->gpio_led) { + if (spec->vmaster_mute.hook) + snd_hda_sync_vmaster_hook(&spec->vmaster_mute); + else /* the very first init call doesn't have vmaster yet */ + stac92xx_update_led_status(codec, false); + } /* sync the power-map */ if (spec->num_pwrs) @@ -5507,6 +5519,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec) static int patch_stac92hd83xxx(struct hda_codec *codec) { struct sigmatel_spec *spec; + int default_polarity = -1; /* no default cfg */ int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); @@ -5555,9 +5568,15 @@ again: case STAC_HP_ZEPHYR: spec->init = stac92hd83xxx_hp_zephyr_init; break; + case STAC_92HD83XXX_HP_LED: + default_polarity = 0; + break; + case STAC_92HD83XXX_HP_INV_LED: + default_polarity = 1; + break; } - if (find_mute_led_cfg(codec, -1/*no default cfg*/)) + if (find_mute_led_cfg(codec, default_polarity)) snd_printd("mute LED gpio %d polarity %d\n", spec->gpio_led, spec->gpio_led_polarity); @@ -5730,7 +5749,6 @@ again: /* fallthru */ case 0x111d76b4: /* 6 Port without Analog Mixer */ case 0x111d76b5: - spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, @@ -5755,7 +5773,6 @@ again: spec->stream_delay = 40; /* 40 milliseconds */ /* disable VSW */ - spec->init = stac92hd71bxx_core_init; unmute_init++; snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0); snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); @@ -5770,7 +5787,6 @@ again: /* fallthru */ default: - spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, @@ -5778,6 +5794,9 @@ again: break; } + if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB) + spec->init = stac92hd71bxx_core_init; + if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) snd_hda_sequence_write_cache(codec, unmute_init); diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 90645560ed39..430771776915 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -1752,6 +1752,14 @@ static int via_suspend(struct hda_codec *codec) { struct via_spec *spec = codec->spec; vt1708_stop_hp_work(spec); + + if (spec->codec_type == VT1802) { + /* Fix pop noise on headphones */ + int i; + for (i = 0; i < spec->autocfg.hp_outs; i++) + snd_hda_set_pin_ctl(codec, spec->autocfg.hp_pins[i], 0); + } + return 0; } #endif @@ -3226,7 +3234,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec) { struct via_spec *spec = codec->spec; int imux_is_smixer; - unsigned int parm; + unsigned int parm, parm2; /* MUX6 (1eh) = stereo mixer */ imux_is_smixer = snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5; @@ -3249,7 +3257,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec) parm = AC_PWRST_D3; set_pin_power_state(codec, 0x27, &parm); update_power_state(codec, 0x1a, parm); - update_power_state(codec, 0xb, parm); + parm2 = parm; /* for pin 0x0b */ /* PW2 (26h), AOW2 (ah) */ parm = AC_PWRST_D3; @@ -3264,6 +3272,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec) if (!spec->hp_independent_mode) /* check for redirected HP */ set_pin_power_state(codec, 0x28, &parm); update_power_state(codec, 0x8, parm); + if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3) + parm = parm2; + update_power_state(codec, 0xb, parm); /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */ update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm); diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c index d1ab43706735..5579b08bb35b 100644 --- a/sound/pci/lx6464es/lx6464es.c +++ b/sound/pci/lx6464es/lx6464es.c @@ -851,6 +851,8 @@ static int __devinit lx_pcm_create(struct lx6464es *chip) /* hardcoded device name & channel count */ err = snd_pcm_new(chip->card, (char *)card_name, 0, 1, 1, &pcm); + if (err < 0) + return err; pcm->private_data = chip; diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index b8ac8710f47f..b12308b5ba2a 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -6585,7 +6585,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card, snd_printk(KERN_ERR "HDSPM: " "unable to kmalloc Mixer memory of %d Bytes\n", (int)sizeof(struct hdspm_mixer)); - return err; + return -ENOMEM; } hdspm->port_names_in = NULL; diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c index 512434efcc31..805ab6e9a78f 100644 --- a/sound/pci/sis7019.c +++ b/sound/pci/sis7019.c @@ -1377,8 +1377,9 @@ static int __devinit sis_chip_create(struct snd_card *card, if (rc) goto error_out_cleanup; - if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, - sis)) { + rc = request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, + sis); + if (rc) { dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq); goto error_out_cleanup; } diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c index f5ceb6f282de..210cafe04890 100644 --- a/sound/ppc/powermac.c +++ b/sound/ppc/powermac.c @@ -143,7 +143,7 @@ static int __devexit snd_pmac_remove(struct platform_device *devptr) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int snd_pmac_driver_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index 1aa52eff526a..9b18b5243a56 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -1040,6 +1040,7 @@ static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev) GFP_KERNEL); if (!the_card.null_buffer_start_vaddr) { pr_info("%s: nullbuffer alloc failed\n", __func__); + ret = -ENOMEM; goto clean_preallocate; } pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__, diff --git a/sound/soc/blackfin/bf6xx-sport.c b/sound/soc/blackfin/bf6xx-sport.c index 318c5ba5360f..dfb744381c42 100644 --- a/sound/soc/blackfin/bf6xx-sport.c +++ b/sound/soc/blackfin/bf6xx-sport.c @@ -413,7 +413,14 @@ EXPORT_SYMBOL(sport_create); void sport_delete(struct sport_device *sport) { + if (sport->tx_desc) + dma_free_coherent(NULL, sport->tx_desc_size, + sport->tx_desc, 0); + if (sport->rx_desc) + dma_free_coherent(NULL, sport->rx_desc_size, + sport->rx_desc, 0); sport_free_resource(sport); + kfree(sport); } EXPORT_SYMBOL(sport_delete); diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c index 3c795921c5f6..23b40186f9b8 100644 --- a/sound/soc/codecs/ab8500-codec.c +++ b/sound/soc/codecs/ab8500-codec.c @@ -2406,6 +2406,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec) /* Setup AB8500 according to board-settings */ pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent); + + /* Inform SoC Core that we have our own I/O arrangements. */ + codec->control_data = (void *)true; + status = ab8500_audio_setup_mics(codec, &pdata->codec->amics); if (status < 0) { pr_err("%s: Failed to setup mics (%d)!\n", __func__, status); diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c index 8c39dddd7d00..11b1b714b8b5 100644 --- a/sound/soc/codecs/ad1980.c +++ b/sound/soc/codecs/ad1980.c @@ -186,6 +186,7 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec) printk(KERN_INFO "AD1980 SoC Audio Codec\n"); + codec->control_data = codec; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "ad1980: failed to register AC97 codec\n"); diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c index 6276e352125f..8f726c063f42 100644 --- a/sound/soc/codecs/mc13783.c +++ b/sound/soc/codecs/mc13783.c @@ -581,6 +581,8 @@ static int mc13783_probe(struct snd_soc_codec *codec) { struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec); + codec->control_data = priv->mc13xxx; + mc13xxx_lock(priv->mc13xxx); /* these are the reset values */ diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 8af6a5245b18..df2f99d1d428 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -239,6 +239,7 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = { {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */ {"LO", NULL, "DAC"}, /* dac --> line_out */ + {"LINE_IN", NULL, "VAG_POWER"}, {"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */ {"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */ @@ -1357,8 +1358,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) if (ret) goto err; - snd_soc_dapm_new_widgets(&codec->dapm); - return 0; err: diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c index 982e437799a8..33c0f3d39c87 100644 --- a/sound/soc/codecs/stac9766.c +++ b/sound/soc/codecs/stac9766.c @@ -340,6 +340,7 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec) printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION); + codec->control_data = codec; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) goto codec_err; diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index 6537f16d383e..e33d327396ad 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -128,13 +128,9 @@ SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT, ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE), SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5, ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), -SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5, - ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA), ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), @@ -236,8 +232,6 @@ ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); -ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE); -ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); @@ -349,10 +343,6 @@ SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0, NULL, 0), -SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0, - NULL, 0), -SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0, - NULL, 0), SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0, NULL, 0), @@ -466,8 +456,6 @@ ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"), ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"), ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), -ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"), -ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"), ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"), ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), @@ -553,8 +541,6 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"), { name, "EQ4", "EQ4" }, \ { name, "DRC1L", "DRC1L" }, \ { name, "DRC1R", "DRC1R" }, \ - { name, "DRC2L", "DRC2L" }, \ - { name, "DRC2R", "DRC2R" }, \ { name, "LHPF1", "LHPF1" }, \ { name, "LHPF2", "LHPF2" }, \ { name, "LHPF3", "LHPF3" }, \ @@ -639,6 +625,15 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = { { "AIF2 Capture", NULL, "SYSCLK" }, { "AIF3 Capture", NULL, "SYSCLK" }, + { "IN1L PGA", NULL, "IN1L" }, + { "IN1R PGA", NULL, "IN1R" }, + + { "IN2L PGA", NULL, "IN2L" }, + { "IN2R PGA", NULL, "IN2R" }, + + { "IN3L PGA", NULL, "IN3L" }, + { "IN3R PGA", NULL, "IN3R" }, + ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), @@ -675,8 +670,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = { ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"), ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), - ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"), - ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"), ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"), ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 8033f7065189..01ebbcc5c6a4 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -681,6 +681,18 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { { "AIF2 Capture", NULL, "SYSCLK" }, { "AIF3 Capture", NULL, "SYSCLK" }, + { "IN1L PGA", NULL, "IN1L" }, + { "IN1R PGA", NULL, "IN1R" }, + + { "IN2L PGA", NULL, "IN2L" }, + { "IN2R PGA", NULL, "IN2R" }, + + { "IN3L PGA", NULL, "IN3L" }, + { "IN3R PGA", NULL, "IN3R" }, + + { "IN4L PGA", NULL, "IN4L" }, + { "IN4R PGA", NULL, "IN4R" }, + ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index eaf65863ec21..ce6720073798 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -2501,6 +2501,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec, /* VMID 2*250k */ snd_soc_update_bits(codec, WM8962_PWR_MGMT_1, WM8962_VMID_SEL_MASK, 0x100); + + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) + msleep(100); break; case SND_SOC_BIAS_OFF: @@ -3730,21 +3733,6 @@ static int wm8962_runtime_resume(struct device *dev) regcache_sync(wm8962->regmap); - regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP, - WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA, - WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA); - - /* Bias enable at 2*50k for ramp */ - regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, - WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA, - WM8962_BIAS_ENA | 0x180); - - msleep(5); - - /* VMID back to 2x250k for standby */ - regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, - WM8962_VMID_SEL_MASK, 0x100); - return 0; } diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index bb62f4b3d563..6c9eeca85b95 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - bclk_rate = params_rate(params) * 2; + bclk_rate = params_rate(params) * 4; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: bclk_rate *= 16; @@ -3253,10 +3253,13 @@ static void wm8994_mic_work(struct work_struct *work) int ret; int report; + pm_runtime_get_sync(dev); + ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, ®); if (ret < 0) { dev_err(dev, "Failed to read microphone status: %d\n", ret); + pm_runtime_put(dev); return; } @@ -3299,6 +3302,8 @@ static void wm8994_mic_work(struct work_struct *work) snd_soc_jack_report(priv->micdet[1].jack, report, SND_JACK_HEADSET | SND_JACK_BTN_0); + + pm_runtime_put(dev); } static irqreturn_t wm8994_mic_irq(int irq, void *data) @@ -3421,12 +3426,15 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) int reg; bool present; + pm_runtime_get_sync(codec->dev); + mutex_lock(&wm8994->accdet_lock); reg = snd_soc_read(codec, WM1811_JACKDET_CTRL); if (reg < 0) { dev_err(codec->dev, "Failed to read jack status: %d\n", reg); mutex_unlock(&wm8994->accdet_lock); + pm_runtime_put(codec->dev); return IRQ_NONE; } @@ -3491,6 +3499,7 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) SND_JACK_MECHANICAL | SND_JACK_HEADSET | wm8994->btn_mask); + pm_runtime_put(codec->dev); return IRQ_HANDLED; } @@ -3602,6 +3611,8 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA)) return IRQ_HANDLED; + pm_runtime_get_sync(codec->dev); + /* We may occasionally read a detection without an impedence * range being provided - if that happens loop again. */ @@ -3612,6 +3623,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) dev_err(codec->dev, "Failed to read mic detect status: %d\n", reg); + pm_runtime_put(codec->dev); return IRQ_NONE; } @@ -3639,6 +3651,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) dev_warn(codec->dev, "Accessory detection with no callback\n"); out: + pm_runtime_put(codec->dev); return IRQ_HANDLED; } @@ -4025,6 +4038,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) break; case WM8958: if (wm8994->revision < 1) { + snd_soc_dapm_add_routes(dapm, wm8994_intercon, + ARRAY_SIZE(wm8994_intercon)); snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, ARRAY_SIZE(wm8994_revd_intercon)); snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 099e6ec32125..c6d2076a796b 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -148,7 +148,7 @@ SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1), SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1), SOC_ENUM("Capture Volume Steps", wm9712_enum[6]), -SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1), +SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 0), SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0), SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv), @@ -272,7 +272,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]); /* Mic select */ static const struct snd_kcontrol_new wm9712_mic_src_controls = -SOC_DAPM_ENUM("Route", wm9712_enum[7]); +SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]); /* diff select */ static const struct snd_kcontrol_new wm9712_diff_sel_controls = @@ -291,7 +291,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectl_controls), SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectr_controls), -SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0, +SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0, + &wm9712_mic_src_controls), +SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0, &wm9712_mic_src_controls), SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0, &wm9712_diff_sel_controls), @@ -319,6 +321,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0), SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0), SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0), SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0), +SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("HPOUTL"), @@ -379,6 +382,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = { {"Mic PGA", NULL, "MIC1"}, {"Mic PGA", NULL, "MIC2"}, + /* microphones */ + {"Differential Mic", NULL, "MIC1"}, + {"Differential Mic", NULL, "MIC2"}, + {"Left Mic Select Source", "Mic 1", "MIC1"}, + {"Left Mic Select Source", "Mic 2", "MIC2"}, + {"Left Mic Select Source", "Stereo", "MIC1"}, + {"Left Mic Select Source", "Differential", "Differential Mic"}, + {"Right Mic Select Source", "Mic 1", "MIC1"}, + {"Right Mic Select Source", "Mic 2", "MIC2"}, + {"Right Mic Select Source", "Stereo", "MIC2"}, + {"Right Mic Select Source", "Differential", "Differential Mic"}, + /* left capture selector */ {"Left Capture Select", "Mic", "MIC1"}, {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"}, @@ -619,6 +634,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec) { int ret = 0; + codec->control_data = codec; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "wm9712: failed to register AC97 codec\n"); diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 3eb19fb71d17..d0b8a3287a85 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c @@ -1196,6 +1196,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) if (wm9713 == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, wm9713); + codec->control_data = wm9713; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 95441bfc8190..ce5e5cd254dd 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -380,14 +380,20 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev) static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { - if (dev->txnumevt) /* enable FIFO */ + if (dev->txnumevt) { /* enable FIFO */ + mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, + FIFO_ENABLE); mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE); + } mcasp_start_tx(dev); } else { - if (dev->rxnumevt) /* enable FIFO */ + if (dev->rxnumevt) { /* enable FIFO */ + mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, + FIFO_ENABLE); mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE); + } mcasp_start_rx(dev); } } diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 28dd76c7cb1c..81d7728cf67f 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -380,13 +380,14 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai) static struct snd_soc_dai_driver imx_ssi_dai = { .probe = imx_ssi_dai_probe, .playback = { - .channels_min = 1, + /* The SSI does not support monaural audio. */ + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, diff --git a/sound/soc/mxs/Kconfig b/sound/soc/mxs/Kconfig index 99a997f19bb9..b6fa77678d97 100644 --- a/sound/soc/mxs/Kconfig +++ b/sound/soc/mxs/Kconfig @@ -10,7 +10,7 @@ menuconfig SND_MXS_SOC if SND_MXS_SOC config SND_SOC_MXS_SGTL5000 - tristate "SoC Audio support for i.MX boards with sgtl5000" + tristate "SoC Audio support for MXS boards with sgtl5000" depends on I2C select SND_SOC_SGTL5000 help diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index aba71bfa33b1..b3030718c228 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c @@ -394,9 +394,14 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); + struct mxs_saif *master_saif; u32 scr, stat; int ret; + master_saif = mxs_saif_get_master(saif); + if (!master_saif) + return -EINVAL; + /* mclk should already be set */ if (!saif->mclk && saif->mclk_in_use) { dev_err(cpu_dai->dev, "set mclk first\n"); @@ -420,6 +425,25 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, return ret; } + /* prepare clk in hw_param, enable in trigger */ + clk_prepare(saif->clk); + if (saif != master_saif) { + /* + * Set an initial clock rate for the saif internal logic to work + * properly. This is important when working in EXTMASTER mode + * that uses the other saif's BITCLK&LRCLK but it still needs a + * basic clock which should be fast enough for the internal + * logic. + */ + clk_enable(saif->clk); + ret = clk_set_rate(saif->clk, 24000000); + clk_disable(saif->clk); + if (ret) + return ret; + + clk_prepare(master_saif->clk); + } + scr = __raw_readl(saif->base + SAIF_CTRL); scr &= ~BM_SAIF_CTRL_WORD_LENGTH; diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c index 34835e8a9160..d33c48baaf71 100644 --- a/sound/soc/omap/mcbsp.c +++ b/sound/soc/omap/mcbsp.c @@ -745,7 +745,7 @@ int omap_mcbsp_6pin_src_mux(struct omap_mcbsp *mcbsp, u8 mux) { const char *signal, *src; - if (mcbsp->pdata->mux_signal) + if (!mcbsp->pdata->mux_signal) return -EINVAL; switch (mux) { diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index 1046083e90a0..acdd3ef14e08 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -820,3 +820,4 @@ module_platform_driver(asoc_mcbsp_driver); MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); MODULE_DESCRIPTION("OMAP I2S SoC Interface"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:omap-mcbsp"); diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index 5a649da9122a..f0feb06615f8 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c @@ -441,3 +441,4 @@ module_platform_driver(omap_pcm_driver); MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); MODULE_DESCRIPTION("OMAP PCM DMA module"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:omap-pcm-audio"); diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c index b7b2a1f91425..89b064650f14 100644 --- a/sound/soc/samsung/pcm.c +++ b/sound/soc/samsung/pcm.c @@ -20,7 +20,7 @@ #include <sound/pcm_params.h> #include <plat/audio.h> -#include <plat/dma.h> +#include <mach/dma.h> #include "dma.h" #include "pcm.h" diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index f219b2f7ee68..c501af6d8dbe 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -826,7 +826,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) } if (!rtd->cpu_dai) { - dev_dbg(card->dev, "CPU DAI %s not registered\n", + dev_err(card->dev, "CPU DAI %s not registered\n", dai_link->cpu_dai_name); return -EPROBE_DEFER; } @@ -857,14 +857,14 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) } if (!rtd->codec_dai) { - dev_dbg(card->dev, "CODEC DAI %s not registered\n", + dev_err(card->dev, "CODEC DAI %s not registered\n", dai_link->codec_dai_name); return -EPROBE_DEFER; } } if (!rtd->codec) { - dev_dbg(card->dev, "CODEC %s not registered\n", + dev_err(card->dev, "CODEC %s not registered\n", dai_link->codec_name); return -EPROBE_DEFER; } @@ -888,7 +888,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) rtd->platform = platform; } if (!rtd->platform) { - dev_dbg(card->dev, "platform %s not registered\n", + dev_err(card->dev, "platform %s not registered\n", dai_link->platform_name); return -EPROBE_DEFER; } @@ -1096,7 +1096,7 @@ static int soc_probe_codec(struct snd_soc_card *card, } /* If the driver didn't set I/O up try regmap */ - if (!codec->control_data) + if (!codec->write && dev_get_regmap(codec->dev, NULL)) snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); if (driver->controls) @@ -1481,6 +1481,8 @@ static int soc_check_aux_dev(struct snd_soc_card *card, int num) return 0; } + dev_err(card->dev, "%s not registered\n", aux_dev->codec_name); + return -EPROBE_DEFER; } diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c index 7f8b3b7428bb..0c172938b82a 100644 --- a/sound/soc/soc-jack.c +++ b/sound/soc/soc-jack.c @@ -103,7 +103,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) } /* Report before the DAPM sync to help users updating micbias status */ - blocking_notifier_call_chain(&jack->notifier, status, jack); + blocking_notifier_call_chain(&jack->notifier, jack->status, jack); snd_soc_dapm_sync(dapm); diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c index d684df294c0c..e463529b38bb 100644 --- a/sound/soc/tegra/tegra_alc5632.c +++ b/sound/soc/tegra/tegra_alc5632.c @@ -177,7 +177,7 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev) } alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); - if (alc5632->gpio_hp_det == -ENODEV) + if (alc5632->gpio_hp_det == -EPROBE_DEFER) return -EPROBE_DEFER; ret = snd_soc_of_parse_card_name(card, "nvidia,model"); diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c index 0c5bb33d258e..d4f14e492341 100644 --- a/sound/soc/tegra/tegra_wm8903.c +++ b/sound/soc/tegra/tegra_wm8903.c @@ -284,27 +284,27 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev) } else if (np) { pdata->gpio_spkr_en = of_get_named_gpio(np, "nvidia,spkr-en-gpios", 0); - if (pdata->gpio_spkr_en == -ENODEV) + if (pdata->gpio_spkr_en == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_hp_mute = of_get_named_gpio(np, "nvidia,hp-mute-gpios", 0); - if (pdata->gpio_hp_mute == -ENODEV) + if (pdata->gpio_hp_mute == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); - if (pdata->gpio_hp_det == -ENODEV) + if (pdata->gpio_hp_det == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_int_mic_en = of_get_named_gpio(np, "nvidia,int-mic-en-gpios", 0); - if (pdata->gpio_int_mic_en == -ENODEV) + if (pdata->gpio_int_mic_en == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_ext_mic_en = of_get_named_gpio(np, "nvidia,ext-mic-en-gpios", 0); - if (pdata->gpio_ext_mic_en == -ENODEV) + if (pdata->gpio_ext_mic_en == -EPROBE_DEFER) return -EPROBE_DEFER; } diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c index 62ac0285bfaf..057e28ef770e 100644 --- a/sound/soc/ux500/ux500_msp_dai.c +++ b/sound/soc/ux500/ux500_msp_dai.c @@ -21,7 +21,7 @@ #include <linux/mfd/dbx500-prcmu.h> #include <mach/hardware.h> -#include <mach/board-mop500-msp.h> +#include <mach/msp.h> #include <sound/soc.h> #include <sound/soc-dai.h> diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c index ee14d2dac2f5..5c472f335a64 100644 --- a/sound/soc/ux500/ux500_msp_i2s.c +++ b/sound/soc/ux500/ux500_msp_i2s.c @@ -19,7 +19,7 @@ #include <linux/slab.h> #include <mach/hardware.h> -#include <mach/board-mop500-msp.h> +#include <mach/msp.h> #include <sound/soc.h> diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h index 7f71b4a0d4bc..2d9136da9865 100644 --- a/sound/soc/ux500/ux500_msp_i2s.h +++ b/sound/soc/ux500/ux500_msp_i2s.h @@ -17,7 +17,7 @@ #include <linux/platform_device.h> -#include <mach/board-mop500-msp.h> +#include <mach/msp.h> #define MSP_INPUT_FREQ_APB 48000000 diff --git a/sound/sound_firmware.c b/sound/sound_firmware.c index 7e96249536b4..37711a5d0d6b 100644 --- a/sound/sound_firmware.c +++ b/sound/sound_firmware.c @@ -23,14 +23,14 @@ static int do_mod_firmware_load(const char *fn, char **fp) if (l <= 0 || l > 131072) { printk(KERN_INFO "Invalid firmware '%s'\n", fn); - filp_close(filp, current->files); + filp_close(filp, NULL); return 0; } dp = vmalloc(l); if (dp == NULL) { printk(KERN_INFO "Out of memory loading '%s'.\n", fn); - filp_close(filp, current->files); + filp_close(filp, NULL); return 0; } pos = 0; @@ -38,10 +38,10 @@ static int do_mod_firmware_load(const char *fn, char **fp) { printk(KERN_INFO "Failed to read '%s'.\n", fn); vfree(dp); - filp_close(filp, current->files); + filp_close(filp, NULL); return 0; } - filp_close(filp, current->files); + filp_close(filp, NULL); *fp = dp; return (int) l; } diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 379baad3d5ad..5e634a2eb282 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id) return 0; /* If a clock source can't tell us whether it's valid, we assume it is */ - if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID)) + if (!uac2_control_is_readable(cs_desc->bmControls, + UAC2_CS_CONTROL_CLOCK_VALID - 1)) return 1; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 0f647d22cb4a..c41181202688 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -821,10 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) if (++ep->use_count != 1) return 0; - /* just to be sure */ - deactivate_urbs(ep, 0, 1); - wait_clear_urbs(ep); - ep->active_mask = 0; ep->unlink_mask = 0; ep->phase = 0; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index a1298f379428..62ec808ed792 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -544,6 +544,9 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) subs->last_frame_number = 0; runtime->delay = 0; + /* clear the pending deactivation on the target EPs */ + deactivate_endpoints(subs); + /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 77f124fe57ad..35655c3a7b7a 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -319,6 +319,8 @@ LIB_H += $(ARCH_INCLUDE) LIB_H += util/cgroup.h LIB_H += $(TRACE_EVENT_DIR)event-parse.h LIB_H += util/target.h +LIB_H += util/rblist.h +LIB_H += util/intlist.h LIB_OBJS += $(OUTPUT)util/abspath.o LIB_OBJS += $(OUTPUT)util/alias.o @@ -383,6 +385,8 @@ LIB_OBJS += $(OUTPUT)util/xyarray.o LIB_OBJS += $(OUTPUT)util/cpumap.o LIB_OBJS += $(OUTPUT)util/cgroup.o LIB_OBJS += $(OUTPUT)util/target.o +LIB_OBJS += $(OUTPUT)util/rblist.o +LIB_OBJS += $(OUTPUT)util/intlist.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o @@ -983,7 +987,8 @@ clean: $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(MAKE) -C Documentation/ clean $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS - $(RM) $(OUTPUT)util/*-{bison,flex}* + $(RM) $(OUTPUT)util/*-bison* + $(RM) $(OUTPUT)util/*-flex* $(python-clean) .PHONY: all install clean strip $(LIBTRACEEVENT) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f5a6452931e6..4db6e1ba54e3 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -313,7 +313,7 @@ try_again: } } - perf_session__update_sample_type(session); + perf_session__set_id_hdr_size(session); } static int process_buildids(struct perf_record *rec) @@ -844,8 +844,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) struct perf_record *rec = &record; char errbuf[BUFSIZ]; - perf_header__set_cmdline(argc, argv); - evsel_list = perf_evlist__new(NULL, NULL); if (evsel_list == NULL) return -ENOMEM; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 69b1c1185159..7c88a243b5db 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -249,8 +249,9 @@ static int process_read_event(struct perf_tool *tool, static int perf_report__setup_sample_type(struct perf_report *rep) { struct perf_session *self = rep->session; + u64 sample_type = perf_evlist__sample_type(self->evlist); - if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { + if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__error("Selected --sort parent, but no " "callchain data. Did you call " @@ -274,7 +275,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) if (sort__branch_mode == 1) { if (!self->fd_pipe && - !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { + !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { ui__error("Selected -b but no branch data. " "Did you call perf record without -b?\n"); return -1; diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index d909eb74a0eb..1d592f5cbea9 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -478,7 +478,6 @@ static int test__basic_mmap(void) unsigned int nr_events[nsyscalls], expected_nr_events[nsyscalls], i, j; struct perf_evsel *evsels[nsyscalls], *evsel; - int sample_size = __perf_evsel__sample_size(attr.sample_type); for (i = 0; i < nsyscalls; ++i) { char name[64]; @@ -563,8 +562,7 @@ static int test__basic_mmap(void) goto out_munmap; } - err = perf_event__parse_sample(event, attr.sample_type, sample_size, - false, &sample, false); + err = perf_evlist__parse_sample(evlist, event, &sample, false); if (err) { pr_err("Can't parse sample, err = %d\n", err); goto out_munmap; @@ -661,12 +659,12 @@ static int test__PERF_RECORD(void) const char *cmd = "sleep"; const char *argv[] = { cmd, "1", NULL, }; char *bname; - u64 sample_type, prev_time = 0; + u64 prev_time = 0; bool found_cmd_mmap = false, found_libc_mmap = false, found_vdso_mmap = false, found_ld_mmap = false; - int err = -1, errs = 0, i, wakeups = 0, sample_size; + int err = -1, errs = 0, i, wakeups = 0; u32 cpu; int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; @@ -757,13 +755,6 @@ static int test__PERF_RECORD(void) } /* - * We'll need these two to parse the PERF_SAMPLE_* fields in each - * event. - */ - sample_type = perf_evlist__sample_type(evlist); - sample_size = __perf_evsel__sample_size(sample_type); - - /* * Now that all is properly set up, enable the events, they will * count just on workload.pid, which will start... */ @@ -788,9 +779,7 @@ static int test__PERF_RECORD(void) if (type < PERF_RECORD_MAX) nr_events[type]++; - err = perf_event__parse_sample(event, sample_type, - sample_size, true, - &sample, false); + err = perf_evlist__parse_sample(evlist, event, &sample, false); if (err < 0) { if (verbose) perf_event__fprintf(event, stderr); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 35e86c6df713..68cd61ef6ac5 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -38,6 +38,7 @@ #include "util/cpumap.h" #include "util/xyarray.h" #include "util/sort.h" +#include "util/intlist.h" #include "util/debug.h" @@ -706,8 +707,16 @@ static void perf_event__process_sample(struct perf_tool *tool, int err; if (!machine && perf_guest) { - pr_err("Can't find guest [%d]'s kernel information\n", - event->ip.pid); + static struct intlist *seen; + + if (!seen) + seen = intlist__new(); + + if (!intlist__has_entry(seen, event->ip.pid)) { + pr_err("Can't find guest [%d]'s kernel information\n", + event->ip.pid); + intlist__add(seen, event->ip.pid); + } return; } @@ -811,7 +820,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) int ret; while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { - ret = perf_session__parse_sample(session, event, &sample); + ret = perf_evlist__parse_sample(top->evlist, event, &sample, false); if (ret) { pr_err("Can't parse sample, err = %d\n", ret); continue; @@ -943,8 +952,10 @@ try_again: * based cpu-clock-tick sw counter, which * is always available even if no PMU support: */ - if (attr->type == PERF_TYPE_HARDWARE && - attr->config == PERF_COUNT_HW_CPU_CYCLES) { + if ((err == ENOENT || err == ENXIO) && + (attr->type == PERF_TYPE_HARDWARE) && + (attr->config == PERF_COUNT_HW_CPU_CYCLES)) { + if (verbose) ui__warning("Cycles event not supported,\n" "trying to fall back to cpu-clock-ticks\n"); @@ -1032,7 +1043,7 @@ static int __cmd_top(struct perf_top *top) &top->session->host_machine); perf_top__start_counters(top); top->session->evlist = top->evlist; - perf_session__update_sample_type(top->session); + perf_session__set_id_hdr_size(top->session); /* Wait for a minimal set of events before starting the snapshot */ poll(top->evlist->pollfd, top->evlist->nr_fds, 100); diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 1b197280c621..d84870b06426 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -197,9 +197,6 @@ int perf_event__preprocess_sample(const union perf_event *self, const char *perf_event__name(unsigned int id); -int perf_event__parse_sample(const union perf_event *event, u64 type, - int sample_size, bool sample_id_all, - struct perf_sample *sample, bool swapped); int perf_event__synthesize_sample(union perf_event *event, u64 type, const struct perf_sample *sample, bool swapped); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 3edfd3483816..9b38681add9e 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -881,3 +881,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist) return 0; } + +int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, + struct perf_sample *sample, bool swapped) +{ + struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node); + return perf_evsel__parse_sample(e, event, sample, swapped); +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 40d4d3cdced0..528c1acd9298 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -122,6 +122,9 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist); bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); +int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, + struct perf_sample *sample, bool swapped); + bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e81771364867..2eaae140def2 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -20,7 +20,7 @@ #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) -int __perf_evsel__sample_size(u64 sample_type) +static int __perf_evsel__sample_size(u64 sample_type) { u64 mask = sample_type & PERF_SAMPLE_MASK; int size = 0; @@ -53,6 +53,7 @@ void perf_evsel__init(struct perf_evsel *evsel, evsel->attr = *attr; INIT_LIST_HEAD(&evsel->node); hists__init(&evsel->hists); + evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); } struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) @@ -728,10 +729,10 @@ static bool sample_overlap(const union perf_event *event, return false; } -int perf_event__parse_sample(const union perf_event *event, u64 type, - int sample_size, bool sample_id_all, +int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, struct perf_sample *data, bool swapped) { + u64 type = evsel->attr.sample_type; const u64 *array; /* @@ -746,14 +747,14 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, data->period = 1; if (event->header.type != PERF_RECORD_SAMPLE) { - if (!sample_id_all) + if (!evsel->attr.sample_id_all) return 0; return perf_event__parse_id_sample(event, type, data, swapped); } array = event->sample.array; - if (sample_size + sizeof(event->header) > event->header.size) + if (evsel->sample_size + sizeof(event->header) > event->header.size) return -EFAULT; if (type & PERF_SAMPLE_IP) { @@ -895,7 +896,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u.val32[1] = sample->tid; if (swapped) { /* - * Inverse of what is done in perf_event__parse_sample + * Inverse of what is done in perf_evsel__parse_sample */ u.val32[0] = bswap_32(u.val32[0]); u.val32[1] = bswap_32(u.val32[1]); @@ -930,7 +931,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u.val32[0] = sample->cpu; if (swapped) { /* - * Inverse of what is done in perf_event__parse_sample + * Inverse of what is done in perf_evsel__parse_sample */ u.val32[0] = bswap_32(u.val32[0]); u.val64 = bswap_64(u.val64); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 67cc5033d192..b559929983bb 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -65,6 +65,7 @@ struct perf_evsel { void *func; void *data; } handler; + unsigned int sample_size; bool supported; }; @@ -177,13 +178,8 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel, return __perf_evsel__read(evsel, ncpus, nthreads, true); } -int __perf_evsel__sample_size(u64 sample_type); - -static inline int perf_evsel__sample_size(struct perf_evsel *evsel) -{ - return __perf_evsel__sample_size(evsel->attr.sample_type); -} - void hists__init(struct hists *hists); +int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *sample, bool swapped); #endif /* __PERF_EVSEL_H */ diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 3a6d20443330..74ea3c2f8138 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -174,6 +174,15 @@ perf_header__set_cmdline(int argc, const char **argv) { int i; + /* + * If header_argv has already been set, do not override it. + * This allows a command to set the cmdline, parse args and + * then call another builtin function that implements a + * command -- e.g, cmd_kvm calling cmd_record. + */ + if (header_argv) + return 0; + header_argc = (u32)argc; /* do not include NULL termination */ diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c new file mode 100644 index 000000000000..fd530dced9cb --- /dev/null +++ b/tools/perf/util/intlist.c @@ -0,0 +1,101 @@ +/* + * Based on intlist.c by: + * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com> + * + * Licensed under the GPLv2. + */ + +#include <errno.h> +#include <stdlib.h> +#include <linux/compiler.h> + +#include "intlist.h" + +static struct rb_node *intlist__node_new(struct rblist *rblist __used, + const void *entry) +{ + int i = (int)((long)entry); + struct rb_node *rc = NULL; + struct int_node *node = malloc(sizeof(*node)); + + if (node != NULL) { + node->i = i; + rc = &node->rb_node; + } + + return rc; +} + +static void int_node__delete(struct int_node *ilist) +{ + free(ilist); +} + +static void intlist__node_delete(struct rblist *rblist __used, + struct rb_node *rb_node) +{ + struct int_node *node = container_of(rb_node, struct int_node, rb_node); + + int_node__delete(node); +} + +static int intlist__node_cmp(struct rb_node *rb_node, const void *entry) +{ + int i = (int)((long)entry); + struct int_node *node = container_of(rb_node, struct int_node, rb_node); + + return node->i - i; +} + +int intlist__add(struct intlist *ilist, int i) +{ + return rblist__add_node(&ilist->rblist, (void *)((long)i)); +} + +void intlist__remove(struct intlist *ilist __used, struct int_node *node) +{ + int_node__delete(node); +} + +struct int_node *intlist__find(struct intlist *ilist, int i) +{ + struct int_node *node = NULL; + struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); + + if (rb_node) + node = container_of(rb_node, struct int_node, rb_node); + + return node; +} + +struct intlist *intlist__new(void) +{ + struct intlist *ilist = malloc(sizeof(*ilist)); + + if (ilist != NULL) { + rblist__init(&ilist->rblist); + ilist->rblist.node_cmp = intlist__node_cmp; + ilist->rblist.node_new = intlist__node_new; + ilist->rblist.node_delete = intlist__node_delete; + } + + return ilist; +} + +void intlist__delete(struct intlist *ilist) +{ + if (ilist != NULL) + rblist__delete(&ilist->rblist); +} + +struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx) +{ + struct int_node *node = NULL; + struct rb_node *rb_node; + + rb_node = rblist__entry(&ilist->rblist, idx); + if (rb_node) + node = container_of(rb_node, struct int_node, rb_node); + + return node; +} diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h new file mode 100644 index 000000000000..6d63ab90db50 --- /dev/null +++ b/tools/perf/util/intlist.h @@ -0,0 +1,75 @@ +#ifndef __PERF_INTLIST_H +#define __PERF_INTLIST_H + +#include <linux/rbtree.h> +#include <stdbool.h> + +#include "rblist.h" + +struct int_node { + struct rb_node rb_node; + int i; +}; + +struct intlist { + struct rblist rblist; +}; + +struct intlist *intlist__new(void); +void intlist__delete(struct intlist *ilist); + +void intlist__remove(struct intlist *ilist, struct int_node *in); +int intlist__add(struct intlist *ilist, int i); + +struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx); +struct int_node *intlist__find(struct intlist *ilist, int i); + +static inline bool intlist__has_entry(struct intlist *ilist, int i) +{ + return intlist__find(ilist, i) != NULL; +} + +static inline bool intlist__empty(const struct intlist *ilist) +{ + return rblist__empty(&ilist->rblist); +} + +static inline unsigned int intlist__nr_entries(const struct intlist *ilist) +{ + return rblist__nr_entries(&ilist->rblist); +} + +/* For intlist iteration */ +static inline struct int_node *intlist__first(struct intlist *ilist) +{ + struct rb_node *rn = rb_first(&ilist->rblist.entries); + return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; +} +static inline struct int_node *intlist__next(struct int_node *in) +{ + struct rb_node *rn; + if (!in) + return NULL; + rn = rb_next(&in->rb_node); + return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; +} + +/** + * intlist_for_each - iterate over a intlist + * @pos: the &struct int_node to use as a loop cursor. + * @ilist: the &struct intlist for loop. + */ +#define intlist__for_each(pos, ilist) \ + for (pos = intlist__first(ilist); pos; pos = intlist__next(pos)) + +/** + * intlist_for_each_safe - iterate over a intlist safe against removal of + * int_node + * @pos: the &struct int_node to use as a loop cursor. + * @n: another &struct int_node to use as temporary storage. + * @ilist: the &struct intlist for loop. + */ +#define intlist__for_each_safe(pos, n, ilist) \ + for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\ + pos = n, n = intlist__next(n)) +#endif /* __PERF_INTLIST_H */ diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c index 1b997d2b89ce..127d648cc548 100644 --- a/tools/perf/util/parse-events-test.c +++ b/tools/perf/util/parse-events-test.c @@ -13,6 +13,9 @@ do { \ } \ } while (0) +#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \ + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) + static int test__checkevent_tracepoint(struct perf_evlist *evlist) { struct perf_evsel *evsel = list_entry(evlist->entries.next, @@ -21,8 +24,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", - (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == - evsel->attr.sample_type); + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); return 0; } @@ -37,8 +39,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", - (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) - == evsel->attr.sample_type); + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); } @@ -428,8 +429,7 @@ static int test__checkevent_list(struct perf_evlist *evlist) evsel = list_entry(evsel->node.next, struct perf_evsel, node); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", - (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == - evsel->attr.sample_type); + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c index 99d02aa57dbf..594f8fad5ecd 100644 --- a/tools/perf/util/parse-options.c +++ b/tools/perf/util/parse-options.c @@ -1,6 +1,7 @@ #include "util.h" #include "parse-options.h" #include "cache.h" +#include "header.h" #define OPT_SHORT 1 #define OPT_UNSET 2 @@ -413,6 +414,8 @@ int parse_options(int argc, const char **argv, const struct option *options, { struct parse_opt_ctx_t ctx; + perf_header__set_cmdline(argc, argv); + parse_options_start(&ctx, argc, argv, flags); switch (parse_options_step(&ctx, options, usagestr)) { case PARSE_OPT_HELP: diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index e03b58a48424..0688bfb6d280 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -797,17 +797,13 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, event = perf_evlist__mmap_read(evlist, cpu); if (event != NULL) { - struct perf_evsel *first; PyObject *pyevent = pyrf_event__new(event); struct pyrf_event *pevent = (struct pyrf_event *)pyevent; if (pyevent == NULL) return PyErr_NoMemory(); - first = list_entry(evlist->entries.next, struct perf_evsel, node); - err = perf_event__parse_sample(event, first->attr.sample_type, - perf_evsel__sample_size(first), - sample_id_all, &pevent->sample, false); + err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false); if (err) return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c new file mode 100644 index 000000000000..0171fb611004 --- /dev/null +++ b/tools/perf/util/rblist.c @@ -0,0 +1,107 @@ +/* + * Based on strlist.c by: + * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com> + * + * Licensed under the GPLv2. + */ + +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> + +#include "rblist.h" + +int rblist__add_node(struct rblist *rblist, const void *new_entry) +{ + struct rb_node **p = &rblist->entries.rb_node; + struct rb_node *parent = NULL, *new_node; + + while (*p != NULL) { + int rc; + + parent = *p; + + rc = rblist->node_cmp(parent, new_entry); + if (rc > 0) + p = &(*p)->rb_left; + else if (rc < 0) + p = &(*p)->rb_right; + else + return -EEXIST; + } + + new_node = rblist->node_new(rblist, new_entry); + if (new_node == NULL) + return -ENOMEM; + + rb_link_node(new_node, parent, p); + rb_insert_color(new_node, &rblist->entries); + ++rblist->nr_entries; + + return 0; +} + +void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node) +{ + rb_erase(rb_node, &rblist->entries); + rblist->node_delete(rblist, rb_node); +} + +struct rb_node *rblist__find(struct rblist *rblist, const void *entry) +{ + struct rb_node **p = &rblist->entries.rb_node; + struct rb_node *parent = NULL; + + while (*p != NULL) { + int rc; + + parent = *p; + + rc = rblist->node_cmp(parent, entry); + if (rc > 0) + p = &(*p)->rb_left; + else if (rc < 0) + p = &(*p)->rb_right; + else + return parent; + } + + return NULL; +} + +void rblist__init(struct rblist *rblist) +{ + if (rblist != NULL) { + rblist->entries = RB_ROOT; + rblist->nr_entries = 0; + } + + return; +} + +void rblist__delete(struct rblist *rblist) +{ + if (rblist != NULL) { + struct rb_node *pos, *next = rb_first(&rblist->entries); + + while (next) { + pos = next; + next = rb_next(pos); + rb_erase(pos, &rblist->entries); + rblist->node_delete(rblist, pos); + } + free(rblist); + } +} + +struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx) +{ + struct rb_node *node; + + for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { + if (!idx--) + return node; + } + + return NULL; +} diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h new file mode 100644 index 000000000000..6d0cae5ae83d --- /dev/null +++ b/tools/perf/util/rblist.h @@ -0,0 +1,47 @@ +#ifndef __PERF_RBLIST_H +#define __PERF_RBLIST_H + +#include <linux/rbtree.h> +#include <stdbool.h> + +/* + * create node structs of the form: + * struct my_node { + * struct rb_node rb_node; + * ... my data ... + * }; + * + * create list structs of the form: + * struct mylist { + * struct rblist rblist; + * ... my data ... + * }; + */ + +struct rblist { + struct rb_root entries; + unsigned int nr_entries; + + int (*node_cmp)(struct rb_node *rbn, const void *entry); + struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry); + void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node); +}; + +void rblist__init(struct rblist *rblist); +void rblist__delete(struct rblist *rblist); +int rblist__add_node(struct rblist *rblist, const void *new_entry); +void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node); +struct rb_node *rblist__find(struct rblist *rblist, const void *entry); +struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx); + +static inline bool rblist__empty(const struct rblist *rblist) +{ + return rblist->nr_entries == 0; +} + +static inline unsigned int rblist__nr_entries(const struct rblist *rblist) +{ + return rblist->nr_entries; +} + +#endif /* __PERF_RBLIST_H */ diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8e4f0755d2aa..2437fb0b463a 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -80,14 +80,12 @@ out_close: return -1; } -void perf_session__update_sample_type(struct perf_session *self) +void perf_session__set_id_hdr_size(struct perf_session *session) { - self->sample_type = perf_evlist__sample_type(self->evlist); - self->sample_size = __perf_evsel__sample_size(self->sample_type); - self->sample_id_all = perf_evlist__sample_id_all(self->evlist); - self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); - self->host_machine.id_hdr_size = self->id_hdr_size; - machines__set_id_hdr_size(&self->machines, self->id_hdr_size); + u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); + + session->host_machine.id_hdr_size = id_hdr_size; + machines__set_id_hdr_size(&session->machines, id_hdr_size); } int perf_session__create_kernel_maps(struct perf_session *self) @@ -147,7 +145,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, if (mode == O_RDONLY) { if (perf_session__open(self, force) < 0) goto out_delete; - perf_session__update_sample_type(self); + perf_session__set_id_hdr_size(self); } else if (mode == O_WRONLY) { /* * In O_RDONLY mode this will be performed when reading the @@ -158,7 +156,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, } if (tool && tool->ordering_requires_timestamps && - tool->ordered_samples && !self->sample_id_all) { + tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); tool->ordered_samples = false; } @@ -673,7 +671,8 @@ static void flush_sample_queue(struct perf_session *s, if (iter->timestamp > limit) break; - ret = perf_session__parse_sample(s, iter->event, &sample); + ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample, + s->header.needs_swap); if (ret) pr_err("Can't parse sample, err = %d\n", ret); else @@ -865,16 +864,18 @@ static void perf_session__print_tstamp(struct perf_session *session, union perf_event *event, struct perf_sample *sample) { + u64 sample_type = perf_evlist__sample_type(session->evlist); + if (event->header.type != PERF_RECORD_SAMPLE && - !session->sample_id_all) { + !perf_evlist__sample_id_all(session->evlist)) { fputs("-1 -1 ", stdout); return; } - if ((session->sample_type & PERF_SAMPLE_CPU)) + if ((sample_type & PERF_SAMPLE_CPU)) printf("%u ", sample->cpu); - if (session->sample_type & PERF_SAMPLE_TIME) + if (sample_type & PERF_SAMPLE_TIME) printf("%" PRIu64 " ", sample->time); } @@ -899,6 +900,8 @@ static void dump_event(struct perf_session *session, union perf_event *event, static void dump_sample(struct perf_session *session, union perf_event *event, struct perf_sample *sample) { + u64 sample_type; + if (!dump_trace) return; @@ -906,10 +909,12 @@ static void dump_sample(struct perf_session *session, union perf_event *event, event->header.misc, sample->pid, sample->tid, sample->ip, sample->period, sample->addr); - if (session->sample_type & PERF_SAMPLE_CALLCHAIN) + sample_type = perf_evlist__sample_type(session->evlist); + + if (sample_type & PERF_SAMPLE_CALLCHAIN) callchain__printf(sample); - if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) + if (sample_type & PERF_SAMPLE_BRANCH_STACK) branch_stack__printf(sample); } @@ -1006,7 +1011,7 @@ static int perf_session__preprocess_sample(struct perf_session *session, union perf_event *event, struct perf_sample *sample) { if (event->header.type != PERF_RECORD_SAMPLE || - !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) + !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) return 0; if (!ip_callchain__valid(sample->callchain, event)) { @@ -1030,7 +1035,7 @@ static int perf_session__process_user_event(struct perf_session *session, union case PERF_RECORD_HEADER_ATTR: err = tool->attr(event, &session->evlist); if (err == 0) - perf_session__update_sample_type(session); + perf_session__set_id_hdr_size(session); return err; case PERF_RECORD_HEADER_EVENT_TYPE: return tool->event_type(tool, event); @@ -1065,7 +1070,7 @@ static int perf_session__process_event(struct perf_session *session, int ret; if (session->header.needs_swap) - event_swap(event, session->sample_id_all); + event_swap(event, perf_evlist__sample_id_all(session->evlist)); if (event->header.type >= PERF_RECORD_HEADER_MAX) return -EINVAL; @@ -1078,7 +1083,8 @@ static int perf_session__process_event(struct perf_session *session, /* * For all kernel events we get the sample data */ - ret = perf_session__parse_sample(session, event, &sample); + ret = perf_evlist__parse_sample(session->evlist, event, &sample, + session->header.needs_swap); if (ret) return ret; @@ -1389,9 +1395,9 @@ int perf_session__process_events(struct perf_session *self, return err; } -bool perf_session__has_traces(struct perf_session *self, const char *msg) +bool perf_session__has_traces(struct perf_session *session, const char *msg) { - if (!(self->sample_type & PERF_SAMPLE_RAW)) { + if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); return false; } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 7c435bde6eb0..1f7ec87db7d7 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -41,13 +41,9 @@ struct perf_session { * perf.data file. */ struct hists hists; - u64 sample_type; - int sample_size; int fd; bool fd_pipe; bool repipe; - bool sample_id_all; - u16 id_hdr_size; int cwdlen; char *cwd; struct ordered_samples ordered_samples; @@ -86,7 +82,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr); int perf_session__create_kernel_maps(struct perf_session *self); -void perf_session__update_sample_type(struct perf_session *self); +void perf_session__set_id_hdr_size(struct perf_session *session); void perf_session__remove_thread(struct perf_session *self, struct thread *th); static inline @@ -130,24 +126,6 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); -static inline int perf_session__parse_sample(struct perf_session *session, - const union perf_event *event, - struct perf_sample *sample) -{ - return perf_event__parse_sample(event, session->sample_type, - session->sample_size, - session->sample_id_all, sample, - session->header.needs_swap); -} - -static inline int perf_session__synthesize_sample(struct perf_session *session, - union perf_event *event, - const struct perf_sample *sample) -{ - return perf_event__synthesize_sample(event, session->sample_type, - sample, session->header.needs_swap); -} - struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 6783a2043555..95856ff3dda4 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c @@ -10,23 +10,28 @@ #include <stdlib.h> #include <string.h> -static struct str_node *str_node__new(const char *s, bool dupstr) +static +struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry) { - struct str_node *self = malloc(sizeof(*self)); + const char *s = entry; + struct rb_node *rc = NULL; + struct strlist *strlist = container_of(rblist, struct strlist, rblist); + struct str_node *snode = malloc(sizeof(*snode)); - if (self != NULL) { - if (dupstr) { + if (snode != NULL) { + if (strlist->dupstr) { s = strdup(s); if (s == NULL) goto out_delete; } - self->s = s; + snode->s = s; + rc = &snode->rb_node; } - return self; + return rc; out_delete: - free(self); + free(snode); return NULL; } @@ -37,36 +42,26 @@ static void str_node__delete(struct str_node *self, bool dupstr) free(self); } -int strlist__add(struct strlist *self, const char *new_entry) +static +void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node) { - struct rb_node **p = &self->entries.rb_node; - struct rb_node *parent = NULL; - struct str_node *sn; - - while (*p != NULL) { - int rc; - - parent = *p; - sn = rb_entry(parent, struct str_node, rb_node); - rc = strcmp(sn->s, new_entry); - - if (rc > 0) - p = &(*p)->rb_left; - else if (rc < 0) - p = &(*p)->rb_right; - else - return -EEXIST; - } + struct strlist *slist = container_of(rblist, struct strlist, rblist); + struct str_node *snode = container_of(rb_node, struct str_node, rb_node); - sn = str_node__new(new_entry, self->dupstr); - if (sn == NULL) - return -ENOMEM; + str_node__delete(snode, slist->dupstr); +} - rb_link_node(&sn->rb_node, parent, p); - rb_insert_color(&sn->rb_node, &self->entries); - ++self->nr_entries; +static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) +{ + const char *str = entry; + struct str_node *snode = container_of(rb_node, struct str_node, rb_node); + + return strcmp(snode->s, str); +} - return 0; +int strlist__add(struct strlist *self, const char *new_entry) +{ + return rblist__add_node(&self->rblist, new_entry); } int strlist__load(struct strlist *self, const char *filename) @@ -96,34 +91,20 @@ out: return err; } -void strlist__remove(struct strlist *self, struct str_node *sn) +void strlist__remove(struct strlist *slist, struct str_node *snode) { - rb_erase(&sn->rb_node, &self->entries); - str_node__delete(sn, self->dupstr); + str_node__delete(snode, slist->dupstr); } -struct str_node *strlist__find(struct strlist *self, const char *entry) +struct str_node *strlist__find(struct strlist *slist, const char *entry) { - struct rb_node **p = &self->entries.rb_node; - struct rb_node *parent = NULL; - - while (*p != NULL) { - struct str_node *sn; - int rc; - - parent = *p; - sn = rb_entry(parent, struct str_node, rb_node); - rc = strcmp(sn->s, entry); - - if (rc > 0) - p = &(*p)->rb_left; - else if (rc < 0) - p = &(*p)->rb_right; - else - return sn; - } + struct str_node *snode = NULL; + struct rb_node *rb_node = rblist__find(&slist->rblist, entry); - return NULL; + if (rb_node) + snode = container_of(rb_node, struct str_node, rb_node); + + return snode; } static int strlist__parse_list_entry(struct strlist *self, const char *s) @@ -156,9 +137,12 @@ struct strlist *strlist__new(bool dupstr, const char *slist) struct strlist *self = malloc(sizeof(*self)); if (self != NULL) { - self->entries = RB_ROOT; + rblist__init(&self->rblist); + self->rblist.node_cmp = strlist__node_cmp; + self->rblist.node_new = strlist__node_new; + self->rblist.node_delete = strlist__node_delete; + self->dupstr = dupstr; - self->nr_entries = 0; if (slist && strlist__parse_list(self, slist) != 0) goto out_error; } @@ -171,30 +155,18 @@ out_error: void strlist__delete(struct strlist *self) { - if (self != NULL) { - struct str_node *pos; - struct rb_node *next = rb_first(&self->entries); - - while (next) { - pos = rb_entry(next, struct str_node, rb_node); - next = rb_next(&pos->rb_node); - strlist__remove(self, pos); - } - self->entries = RB_ROOT; - free(self); - } + if (self != NULL) + rblist__delete(&self->rblist); } -struct str_node *strlist__entry(const struct strlist *self, unsigned int idx) +struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) { - struct rb_node *nd; + struct str_node *snode = NULL; + struct rb_node *rb_node; - for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { - struct str_node *pos = rb_entry(nd, struct str_node, rb_node); + rb_node = rblist__entry(&slist->rblist, idx); + if (rb_node) + snode = container_of(rb_node, struct str_node, rb_node); - if (!idx--) - return pos; - } - - return NULL; + return snode; } diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index 3ba839007d2c..dd9f922ec67c 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h @@ -4,14 +4,15 @@ #include <linux/rbtree.h> #include <stdbool.h> +#include "rblist.h" + struct str_node { struct rb_node rb_node; const char *s; }; struct strlist { - struct rb_root entries; - unsigned int nr_entries; + struct rblist rblist; bool dupstr; }; @@ -32,18 +33,18 @@ static inline bool strlist__has_entry(struct strlist *self, const char *entry) static inline bool strlist__empty(const struct strlist *self) { - return self->nr_entries == 0; + return rblist__empty(&self->rblist); } static inline unsigned int strlist__nr_entries(const struct strlist *self) { - return self->nr_entries; + return rblist__nr_entries(&self->rblist); } /* For strlist iteration */ static inline struct str_node *strlist__first(struct strlist *self) { - struct rb_node *rn = rb_first(&self->entries); + struct rb_node *rn = rb_first(&self->rblist.entries); return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; } static inline struct str_node *strlist__next(struct str_node *sn) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index fdad4eeeb429..8b63b678e127 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -64,7 +64,7 @@ static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__NOT_FOUND, }; -#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab) +#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) static enum dso_binary_type binary_type_data[] = { DSO_BINARY_TYPE__BUILD_ID_CACHE, @@ -72,7 +72,7 @@ static enum dso_binary_type binary_type_data[] = { DSO_BINARY_TYPE__NOT_FOUND, }; -#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data) +#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data) int dso__name_len(const struct dso *dso) { @@ -2875,6 +2875,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) int i, items = 0; char path[PATH_MAX]; pid_t pid; + char *endp; if (symbol_conf.default_guest_vmlinux_name || symbol_conf.default_guest_modules || @@ -2891,7 +2892,14 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) /* Filter out . and .. */ continue; } - pid = atoi(namelist[i]->d_name); + pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); + if ((*endp != '\0') || + (endp == namelist[i]->d_name) || + (errno == ERANGE)) { + pr_debug("invalid directory (%s). Skipping.\n", + namelist[i]->d_name); + continue; + } sprintf(path, "%s/%s/proc/kallsyms", symbol_conf.guestmount, namelist[i]->d_name); diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c index 3f59c496e64c..051eaa68095e 100644 --- a/tools/perf/util/target.c +++ b/tools/perf/util/target.c @@ -110,7 +110,7 @@ int perf_target__strerror(struct perf_target *target, int errnum, int idx; const char *msg; - BUG_ON(buflen > 0); + BUG_ON(buflen == 0); if (errnum >= 0) { const char *err = strerror_r(errnum, buf, buflen); |