diff options
125 files changed, 5766 insertions, 1731 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus new file mode 100644 index 000000000000..636e938d5e33 --- /dev/null +++ b/Documentation/ABI/stable/sysfs-bus-vmbus @@ -0,0 +1,29 @@ +What: /sys/bus/vmbus/devices/vmbus_*/id +Date: Jul 2009 +KernelVersion: 2.6.31 +Contact: K. Y. Srinivasan <kys@microsoft.com> +Description: The VMBus child_relid of the device's primary channel +Users: tools/hv/lsvmbus + +What: /sys/bus/vmbus/devices/vmbus_*/class_id +Date: Jul 2009 +KernelVersion: 2.6.31 +Contact: K. Y. Srinivasan <kys@microsoft.com> +Description: The VMBus interface type GUID of the device +Users: tools/hv/lsvmbus + +What: /sys/bus/vmbus/devices/vmbus_*/device_id +Date: Jul 2009 +KernelVersion: 2.6.31 +Contact: K. Y. Srinivasan <kys@microsoft.com> +Description: The VMBus interface instance GUID of the device +Users: tools/hv/lsvmbus + +What: /sys/bus/vmbus/devices/vmbus_*/channel_vp_mapping +Date: Jul 2015 +KernelVersion: 4.2.0 +Contact: K. Y. Srinivasan <kys@microsoft.com> +Description: The mapping of which primary/sub channels are bound to which + Virtual Processors. + Format: <channel's child_relid:the bound cpu's number> +Users: tools/hv/lsvmbus diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x index b4d0b99afffb..d72ca1736ba4 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x @@ -112,7 +112,7 @@ KernelVersion: 3.19 Contact: Mathieu Poirier <mathieu.poirier@linaro.org> Description: (RW) Mask to apply to all the context ID comparator. -What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_val +What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_pid Date: November 2014 KernelVersion: 3.19 Contact: Mathieu Poirier <mathieu.poirier@linaro.org> diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x index 2fe2e3dae487..2355ed8ae31f 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x @@ -249,7 +249,7 @@ KernelVersion: 4.01 Contact: Mathieu Poirier <mathieu.poirier@linaro.org> Description: (RW) Select which context ID comparator to work with. -What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_val +What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_pid Date: April 2015 KernelVersion: 4.01 Contact: Mathieu Poirier <mathieu.poirier@linaro.org> diff --git a/Documentation/ABI/testing/sysfs-driver-sunxi-sid b/Documentation/ABI/testing/sysfs-driver-sunxi-sid deleted file mode 100644 index ffb9536f6ecc..000000000000 --- a/Documentation/ABI/testing/sysfs-driver-sunxi-sid +++ /dev/null @@ -1,22 +0,0 @@ -What: /sys/devices/*/<our-device>/eeprom -Date: August 2013 -Contact: Oliver Schinagl <oliver@schinagl.nl> -Description: read-only access to the SID (Security-ID) on current - A-series SoC's from Allwinner. Currently supports A10, A10s, A13 - and A20 CPU's. The earlier A1x series of SoCs exports 16 bytes, - whereas the newer A20 SoC exposes 512 bytes split into sections. - Besides the 16 bytes of SID, there's also an SJTAG area, - HDMI-HDCP key and some custom keys. Below a quick overview, for - details see the user manual: - 0x000 128 bit root-key (sun[457]i) - 0x010 128 bit boot-key (sun7i) - 0x020 64 bit security-jtag-key (sun7i) - 0x028 16 bit key configuration (sun7i) - 0x02b 16 bit custom-vendor-key (sun7i) - 0x02c 320 bit low general key (sun7i) - 0x040 32 bit read-control access (sun7i) - 0x064 224 bit low general key (sun7i) - 0x080 2304 bit HDCP-key (sun7i) - 0x1a0 768 bit high general key (sun7i) -Users: any user space application which wants to read the SID on - Allwinner's A-series of CPU's. diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt index 65a6db2271a2..62938eb9697f 100644 --- a/Documentation/devicetree/bindings/arm/coresight.txt +++ b/Documentation/devicetree/bindings/arm/coresight.txt @@ -17,6 +17,7 @@ its hardware characteristcs. - "arm,coresight-tmc", "arm,primecell"; - "arm,coresight-funnel", "arm,primecell"; - "arm,coresight-etm3x", "arm,primecell"; + - "arm,coresight-etm4x", "arm,primecell"; - "qcom,coresight-replicator1x", "arm,primecell"; * reg: physical base address and length of the register diff --git a/Documentation/devicetree/bindings/misc/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt index fabdf64a5737..d543ed3f5363 100644 --- a/Documentation/devicetree/bindings/misc/allwinner,sunxi-sid.txt +++ b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt @@ -4,6 +4,10 @@ Required properties: - compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid" - reg: Should contain registers location and length += Data cells = +Are child nodes of qfprom, bindings of which as described in +bindings/nvmem/nvmem.txt + Example for sun4i: sid@01c23800 { compatible = "allwinner,sun4i-a10-sid"; diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.txt b/Documentation/devicetree/bindings/nvmem/nvmem.txt new file mode 100644 index 000000000000..b52bc11e9597 --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/nvmem.txt @@ -0,0 +1,80 @@ += NVMEM(Non Volatile Memory) Data Device Tree Bindings = + +This binding is intended to represent the location of hardware +configuration data stored in NVMEMs like eeprom, efuses and so on. + +On a significant proportion of boards, the manufacturer has stored +some data on NVMEM, for the OS to be able to retrieve these information +and act upon it. Obviously, the OS has to know about where to retrieve +these data from, and where they are stored on the storage device. + +This document is here to document this. + += Data providers = +Contains bindings specific to provider drivers and data cells as children +of this node. + +Optional properties: + read-only: Mark the provider as read only. + += Data cells = +These are the child nodes of the provider which contain data cell +information like offset and size in nvmem provider. + +Required properties: +reg: specifies the offset in byte within the storage device. + +Optional properties: + +bits: Is pair of bit location and number of bits, which specifies offset + in bit and number of bits within the address range specified by reg property. + Offset takes values from 0-7. + +For example: + + /* Provider */ + qfprom: qfprom@00700000 { + ... + + /* Data cells */ + tsens_calibration: calib@404 { + reg = <0x404 0x10>; + }; + + tsens_calibration_bckp: calib_bckp@504 { + reg = <0x504 0x11>; + bits = <6 128> + }; + + pvs_version: pvs-version@6 { + reg = <0x6 0x2> + bits = <7 2> + }; + + speed_bin: speed-bin@c{ + reg = <0xc 0x1>; + bits = <2 3>; + + }; + ... + }; + += Data consumers = +Are device nodes which consume nvmem data cells/providers. + +Required-properties: +nvmem-cells: list of phandle to the nvmem data cells. +nvmem-cell-names: names for the each nvmem-cells specified. Required if + nvmem-cells is used. + +Optional-properties: +nvmem : list of phandles to nvmem providers. +nvmem-names: names for the each nvmem provider. required if nvmem is used. + +For example: + + tsens { + ... + nvmem-cells = <&tsens_calibration>; + nvmem-cell-names = "calibration"; + }; diff --git a/Documentation/devicetree/bindings/nvmem/qfprom.txt b/Documentation/devicetree/bindings/nvmem/qfprom.txt new file mode 100644 index 000000000000..4ad68b7f5c18 --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/qfprom.txt @@ -0,0 +1,35 @@ += Qualcomm QFPROM device tree bindings = + +This binding is intended to represent QFPROM which is found in most QCOM SOCs. + +Required properties: +- compatible: should be "qcom,qfprom" +- reg: Should contain registers location and length + += Data cells = +Are child nodes of qfprom, bindings of which as described in +bindings/nvmem/nvmem.txt + +Example: + + qfprom: qfprom@00700000 { + compatible = "qcom,qfprom"; + reg = <0x00700000 0x8000>; + ... + /* Data cells */ + tsens_calibration: calib@404 { + reg = <0x4404 0x10>; + }; + }; + + += Data consumers = +Are device nodes which consume nvmem data cells. + +For example: + + tsens { + ... + nvmem-cells = <&tsens_calibration>; + nvmem-cell-names = "calibration"; + }; diff --git a/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt b/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt new file mode 100644 index 000000000000..0e6d8754e7ec --- /dev/null +++ b/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt @@ -0,0 +1,48 @@ +Qualcomm Coincell Charger: + +The hardware block controls charging for a coincell or capacitor that is +used to provide power backup for certain features of the power management +IC (PMIC) + +- compatible: + Usage: required + Value type: <string> + Definition: must be: "qcom,pm8941-coincell" + +- reg: + Usage: required + Value type: <u32> + Definition: base address of the coincell charger registers + +- qcom,rset-ohms: + Usage: required + Value type: <u32> + Definition: resistance (in ohms) for current-limiting resistor + must be one of: 800, 1200, 1700, 2100 + +- qcom,vset-millivolts: + Usage: required + Value type: <u32> + Definition: voltage (in millivolts) to apply for charging + must be one of: 2500, 3000, 3100, 3200 + +- qcom,charger-disable: + Usage: optional + Value type: <boolean> + Definition: definining this property disables charging + +This charger is a sub-node of one of the 8941 PMIC blocks, and is specified +as a child node in DTS of that node. See ../mfd/qcom,spmi-pmic.txt and +../mfd/qcom-pm8xxx.txt + +Example: + + pm8941@0 { + coincell@2800 { + compatible = "qcom,pm8941-coincell"; + reg = <0x2800>; + + qcom,rset-ohms = <2100>; + qcom,vset-millivolts = <3000>; + }; + }; diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 611c52267d24..141f847c7648 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -124,6 +124,8 @@ Code Seq#(hex) Include File Comments 'H' 00-7F linux/hiddev.h conflict! 'H' 00-0F linux/hidraw.h conflict! 'H' 01 linux/mei.h conflict! +'H' 02 linux/mei.h conflict! +'H' 03 linux/mei.h conflict! 'H' 00-0F sound/asound.h conflict! 'H' 20-40 sound/asound_fm.h conflict! 'H' 80-8F sound/sfnt_info.h conflict! diff --git a/Documentation/misc-devices/mei/mei.txt b/Documentation/misc-devices/mei/mei.txt index 8d47501bba0a..91c1fa34f48b 100644 --- a/Documentation/misc-devices/mei/mei.txt +++ b/Documentation/misc-devices/mei/mei.txt @@ -96,7 +96,7 @@ A code snippet for an application communicating with Intel AMTHI client: IOCTL ===== -The Intel MEI Driver supports the following IOCTL command: +The Intel MEI Driver supports the following IOCTL commands: IOCTL_MEI_CONNECT_CLIENT Connect to firmware Feature (client). usage: @@ -125,6 +125,49 @@ The Intel MEI Driver supports the following IOCTL command: data that can be sent or received. (e.g. if MTU=2K, can send requests up to bytes 2k and received responses up to 2k bytes). + IOCTL_MEI_NOTIFY_SET: enable or disable event notifications + + Usage: + uint32_t enable; + ioctl(fd, IOCTL_MEI_NOTIFY_SET, &enable); + + Inputs: + uint32_t enable = 1; + or + uint32_t enable[disable] = 0; + + Error returns: + EINVAL Wrong IOCTL Number + ENODEV Device is not initialized or the client not connected + ENOMEM Unable to allocate memory to client internal data. + EFAULT Fatal Error (e.g. Unable to access user input data) + EOPNOTSUPP if the device doesn't support the feature + + Notes: + The client must be connected in order to enable notification events + + + IOCTL_MEI_NOTIFY_GET : retrieve event + + Usage: + uint32_t event; + ioctl(fd, IOCTL_MEI_NOTIFY_GET, &event); + + Outputs: + 1 - if an event is pending + 0 - if there is no even pending + + Error returns: + EINVAL Wrong IOCTL Number + ENODEV Device is not initialized or the client not connected + ENOMEM Unable to allocate memory to client internal data. + EFAULT Fatal Error (e.g. Unable to access user input data) + EOPNOTSUPP if the device doesn't support the feature + + Notes: + The client must be connected and event notification has to be enabled + in order to receive an event + Intel ME Applications ===================== diff --git a/Documentation/nvmem/nvmem.txt b/Documentation/nvmem/nvmem.txt new file mode 100644 index 000000000000..dbd40d879239 --- /dev/null +++ b/Documentation/nvmem/nvmem.txt @@ -0,0 +1,152 @@ + NVMEM SUBSYSTEM + Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + +This document explains the NVMEM Framework along with the APIs provided, +and how to use it. + +1. Introduction +=============== +*NVMEM* is the abbreviation for Non Volatile Memory layer. It is used to +retrieve configuration of SOC or Device specific data from non volatile +memories like eeprom, efuses and so on. + +Before this framework existed, NVMEM drivers like eeprom were stored in +drivers/misc, where they all had to duplicate pretty much the same code to +register a sysfs file, allow in-kernel users to access the content of the +devices they were driving, etc. + +This was also a problem as far as other in-kernel users were involved, since +the solutions used were pretty much different from one driver to another, there +was a rather big abstraction leak. + +This framework aims at solve these problems. It also introduces DT +representation for consumer devices to go get the data they require (MAC +Addresses, SoC/Revision ID, part numbers, and so on) from the NVMEMs. This +framework is based on regmap, so that most of the abstraction available in +regmap can be reused, across multiple types of buses. + +NVMEM Providers ++++++++++++++++ + +NVMEM provider refers to an entity that implements methods to initialize, read +and write the non-volatile memory. + +2. Registering/Unregistering the NVMEM provider +=============================================== + +A NVMEM provider can register with NVMEM core by supplying relevant +nvmem configuration to nvmem_register(), on success core would return a valid +nvmem_device pointer. + +nvmem_unregister(nvmem) is used to unregister a previously registered provider. + +For example, a simple qfprom case: + +static struct nvmem_config econfig = { + .name = "qfprom", + .owner = THIS_MODULE, +}; + +static int qfprom_probe(struct platform_device *pdev) +{ + ... + econfig.dev = &pdev->dev; + nvmem = nvmem_register(&econfig); + ... +} + +It is mandatory that the NVMEM provider has a regmap associated with its +struct device. Failure to do would return error code from nvmem_register(). + +NVMEM Consumers ++++++++++++++++ + +NVMEM consumers are the entities which make use of the NVMEM provider to +read from and to NVMEM. + +3. NVMEM cell based consumer APIs +================================= + +NVMEM cells are the data entries/fields in the NVMEM. +The NVMEM framework provides 3 APIs to read/write NVMEM cells. + +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); + +void nvmem_cell_put(struct nvmem_cell *cell); +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); + +void *nvmem_cell_read(struct nvmem_cell *cell, ssize_t *len); +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, ssize_t len); + +*nvmem_cell_get() apis will get a reference to nvmem cell for a given id, +and nvmem_cell_read/write() can then read or write to the cell. +Once the usage of the cell is finished the consumer should call *nvmem_cell_put() +to free all the allocation memory for the cell. + +4. Direct NVMEM device based consumer APIs +========================================== + +In some instances it is necessary to directly read/write the NVMEM. +To facilitate such consumers NVMEM framework provides below apis. + +struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); +struct nvmem_device *devm_nvmem_device_get(struct device *dev, + const char *name); +void nvmem_device_put(struct nvmem_device *nvmem); +int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset, + size_t bytes, void *buf); +int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset, + size_t bytes, void *buf); +int nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); + +Before the consumers can read/write NVMEM directly, it should get hold +of nvmem_controller from one of the *nvmem_device_get() api. + +The difference between these apis and cell based apis is that these apis always +take nvmem_device as parameter. + +5. Releasing a reference to the NVMEM +===================================== + +When a consumers no longer needs the NVMEM, it has to release the reference +to the NVMEM it has obtained using the APIs mentioned in the above section. +The NVMEM framework provides 2 APIs to release a reference to the NVMEM. + +void nvmem_cell_put(struct nvmem_cell *cell); +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); +void nvmem_device_put(struct nvmem_device *nvmem); +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem); + +Both these APIs are used to release a reference to the NVMEM and +devm_nvmem_cell_put and devm_nvmem_device_put destroys the devres associated +with this NVMEM. + +Userspace ++++++++++ + +6. Userspace binary interface +============================== + +Userspace can read/write the raw NVMEM file located at +/sys/bus/nvmem/devices/*/nvmem + +ex: + +hexdump /sys/bus/nvmem/devices/qfprom0/nvmem + +0000000 0000 0000 0000 0000 0000 0000 0000 0000 +* +00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00 +0000000 0000 0000 0000 0000 0000 0000 0000 0000 +... +* +0001000 + +7. DeviceTree Binding +===================== + +See Documentation/devicetree/bindings/nvmem/nvmem.txt diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt index 2850df3bf957..2fc909502db5 100644 --- a/Documentation/power/suspend-and-cpuhotplug.txt +++ b/Documentation/power/suspend-and-cpuhotplug.txt @@ -72,7 +72,7 @@ More details follow: | v Disable regular cpu hotplug - by setting cpu_hotplug_disabled=1 + by increasing cpu_hotplug_disabled | v Release cpu_add_remove_lock @@ -89,7 +89,7 @@ Resuming back is likewise, with the counterparts being (in the order of execution during resume): * enable_nonboot_cpus() which involves: | Acquire cpu_add_remove_lock - | Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug + | Decrease cpu_hotplug_disabled, thereby enabling regular cpu hotplug | Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop] | Release cpu_add_remove_lock v @@ -120,7 +120,7 @@ after the entire cycle is complete (i.e., suspend + resume). Acquire cpu_add_remove_lock | v - If cpu_hotplug_disabled is 1 + If cpu_hotplug_disabled > 0 return gracefully | | diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt index 77d14d51a670..0a5c3290e732 100644 --- a/Documentation/trace/coresight.txt +++ b/Documentation/trace/coresight.txt @@ -15,7 +15,7 @@ HW assisted tracing is becoming increasingly useful when dealing with systems that have many SoCs and other components like GPU and DMA engines. ARM has developed a HW assisted tracing solution by means of different components, each being added to a design at synthesis time to cater to specific tracing needs. -Compoments are generally categorised as source, link and sinks and are +Components are generally categorised as source, link and sinks and are (usually) discovered using the AMBA bus. "Sources" generate a compressed stream representing the processor instruction @@ -138,7 +138,7 @@ void coresight_unregister(struct coresight_device *csdev); The registering function is taking a "struct coresight_device *csdev" and register the device with the core framework. The unregister function takes -a reference to a "strut coresight_device", obtained at registration time. +a reference to a "struct coresight_device", obtained at registration time. If everything goes well during the registration process the new devices will show up under /sys/bus/coresight/devices, as showns here for a TC2 platform: diff --git a/MAINTAINERS b/MAINTAINERS index a9ae6c105520..d954927fa241 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4957,6 +4957,7 @@ F: drivers/scsi/storvsc_drv.c F: drivers/video/fbdev/hyperv_fb.c F: include/linux/hyperv.h F: tools/hv/ +F: Documentation/ABI/stable/sysfs-bus-vmbus I2C OVER PARALLEL PORT M: Jean Delvare <jdelvare@suse.com> @@ -7288,6 +7289,15 @@ S: Supported F: drivers/block/nvme* F: include/linux/nvme.h +NVMEM FRAMEWORK +M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> +M: Maxime Ripard <maxime.ripard@free-electrons.com> +S: Maintained +F: drivers/nvmem/ +F: Documentation/devicetree/bindings/nvmem/ +F: include/linux/nvmem-consumer.h +F: include/linux/nvmem-provider.h + NXP-NCI NFC DRIVER M: Clément Perrochaud <clement.perrochaud@effinnov.com> R: Charles Gorand <charles.gorand@effinnov.com> diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts index bd35b0674ff6..9bc72a3356e4 100644 --- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts +++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts @@ -17,3 +17,13 @@ status = "ok"; }; }; + +&spmi_bus { + pm8941@0 { + coincell@2800 { + status = "ok"; + qcom,rset-ohms = <2100>; + qcom,vset-millivolts = <3000>; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi index aa774e685018..968f1043d4f5 100644 --- a/arch/arm/boot/dts/qcom-pm8941.dtsi +++ b/arch/arm/boot/dts/qcom-pm8941.dtsi @@ -125,6 +125,12 @@ interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>; qcom,external-resistor-micro-ohms = <10000>; }; + + coincell@2800 { + compatible = "qcom,pm8941-coincell"; + reg = <0x2800>; + status = "disabled"; + }; }; usid1: pm8941@1 { diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index c163215abb9a..aaf59b7da98a 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -7,6 +7,7 @@ struct ms_hyperv_info { u32 features; + u32 misc_features; u32 hints; }; @@ -20,4 +21,8 @@ void hyperv_vector_handler(struct pt_regs *regs); void hv_setup_vmbus_irq(void (*handler)(void)); void hv_remove_vmbus_irq(void); +void hv_setup_kexec_handler(void (*handler)(void)); +void hv_remove_kexec_handler(void); +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); +void hv_remove_crash_handler(void); #endif diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index f36d56bd7632..f0412c50c47b 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -27,6 +27,8 @@ #define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0) /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/ #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1) +/* Partition reference TSC MSR is available */ +#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9) /* A partition's reference time stamp counter (TSC) page */ #define HV_X64_MSR_REFERENCE_TSC 0x40000021 diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index aad4bd84b475..f794bfa3c138 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -18,6 +18,7 @@ #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/kexec.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv.h> @@ -28,10 +29,14 @@ #include <asm/i8259.h> #include <asm/apic.h> #include <asm/timer.h> +#include <asm/reboot.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); + #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); @@ -67,8 +72,47 @@ void hv_remove_vmbus_irq(void) } EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq); EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq); + +void hv_setup_kexec_handler(void (*handler)(void)) +{ + hv_kexec_handler = handler; +} +EXPORT_SYMBOL_GPL(hv_setup_kexec_handler); + +void hv_remove_kexec_handler(void) +{ + hv_kexec_handler = NULL; +} +EXPORT_SYMBOL_GPL(hv_remove_kexec_handler); + +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)) +{ + hv_crash_handler = handler; +} +EXPORT_SYMBOL_GPL(hv_setup_crash_handler); + +void hv_remove_crash_handler(void) +{ + hv_crash_handler = NULL; +} +EXPORT_SYMBOL_GPL(hv_remove_crash_handler); #endif +static void hv_machine_shutdown(void) +{ + if (kexec_in_progress && hv_kexec_handler) + hv_kexec_handler(); + native_machine_shutdown(); +} + +static void hv_machine_crash_shutdown(struct pt_regs *regs) +{ + if (hv_crash_handler) + hv_crash_handler(regs); + native_machine_crash_shutdown(regs); +} + + static uint32_t __init ms_hyperv_platform(void) { u32 eax; @@ -114,6 +158,7 @@ static void __init ms_hyperv_init_platform(void) * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); + ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", @@ -141,6 +186,8 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif + machine_ops.shutdown = hv_machine_shutdown; + machine_ops.crash_shutdown = hv_machine_crash_shutdown; } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { diff --git a/drivers/Kconfig b/drivers/Kconfig index 6e973b8e3a3b..4e2e6aaf0b88 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -184,4 +184,6 @@ source "drivers/android/Kconfig" source "drivers/nvdimm/Kconfig" +source "drivers/nvmem/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index b64b49f6e01b..4c270f5414f0 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -165,3 +165,4 @@ obj-$(CONFIG_RAS) += ras/ obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ obj-$(CONFIG_ANDROID) += android/ +obj-$(CONFIG_NVMEM) += nvmem/ diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c index 5b93852392b8..816de9eaac26 100644 --- a/drivers/auxdisplay/ks0108.c +++ b/drivers/auxdisplay/ks0108.c @@ -23,6 +23,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> @@ -90,17 +92,19 @@ void ks0108_displaystate(unsigned char state) void ks0108_startline(unsigned char startline) { - ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7)); + ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) | + bit(7)); } void ks0108_address(unsigned char address) { - ks0108_writedata(min(address,(unsigned char)63) | bit(6)); + ks0108_writedata(min_t(unsigned char, address, 63) | bit(6)); } void ks0108_page(unsigned char page) { - ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7)); + ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) | + bit(5) | bit(7)); } EXPORT_SYMBOL_GPL(ks0108_writedata); @@ -121,52 +125,71 @@ unsigned char ks0108_isinited(void) } EXPORT_SYMBOL_GPL(ks0108_isinited); -/* - * Module Init & Exit - */ - -static int __init ks0108_init(void) +static void ks0108_parport_attach(struct parport *port) { - int result; - int ret = -EINVAL; - - ks0108_parport = parport_find_base(ks0108_port); - if (ks0108_parport == NULL) { - printk(KERN_ERR KS0108_NAME ": ERROR: " - "parport didn't find %i port\n", ks0108_port); - goto none; - } - - ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME, - NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); - if (ks0108_pardevice == NULL) { - printk(KERN_ERR KS0108_NAME ": ERROR: " - "parport didn't register new device\n"); - goto none; + struct pardev_cb ks0108_cb; + + if (port->base != ks0108_port) + return; + + memset(&ks0108_cb, 0, sizeof(ks0108_cb)); + ks0108_cb.flags = PARPORT_DEV_EXCL; + ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME, + &ks0108_cb, 0); + if (!ks0108_pardevice) { + pr_err("ERROR: parport didn't register new device\n"); + return; } - - result = parport_claim(ks0108_pardevice); - if (result != 0) { - printk(KERN_ERR KS0108_NAME ": ERROR: " - "can't claim %i parport, maybe in use\n", ks0108_port); - ret = result; - goto registered; + if (parport_claim(ks0108_pardevice)) { + pr_err("could not claim access to parport %i. Aborting.\n", + ks0108_port); + goto err_unreg_device; } + ks0108_parport = port; ks0108_inited = 1; - return 0; + return; -registered: +err_unreg_device: parport_unregister_device(ks0108_pardevice); - -none: - return ret; + ks0108_pardevice = NULL; } -static void __exit ks0108_exit(void) +static void ks0108_parport_detach(struct parport *port) { + if (port->base != ks0108_port) + return; + + if (!ks0108_pardevice) { + pr_err("%s: already unregistered.\n", KS0108_NAME); + return; + } + parport_release(ks0108_pardevice); parport_unregister_device(ks0108_pardevice); + ks0108_pardevice = NULL; + ks0108_parport = NULL; +} + +/* + * Module Init & Exit + */ + +static struct parport_driver ks0108_parport_driver = { + .name = "ks0108", + .match_port = ks0108_parport_attach, + .detach = ks0108_parport_detach, + .devmodel = true, +}; + +static int __init ks0108_init(void) +{ + return parport_register_driver(&ks0108_parport_driver); +} + +static void __exit ks0108_exit(void) +{ + parport_unregister_driver(&ks0108_parport_driver); } module_init(ks0108_init); diff --git a/drivers/char/misc.c b/drivers/char/misc.c index fdb0f9b3fe45..8069b361b8dd 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -243,17 +243,15 @@ int misc_register(struct miscdevice * misc) * @misc: device to unregister * * Unregister a miscellaneous device that was previously - * successfully registered with misc_register(). Success - * is indicated by a zero return, a negative errno code - * indicates an error. + * successfully registered with misc_register(). */ -int misc_deregister(struct miscdevice *misc) +void misc_deregister(struct miscdevice *misc) { int i = DYNAMIC_MINORS - misc->minor - 1; if (WARN_ON(list_empty(&misc->list))) - return -EINVAL; + return; mutex_lock(&misc_mtx); list_del(&misc->list); @@ -261,7 +259,6 @@ int misc_deregister(struct miscdevice *misc) if (i < DYNAMIC_MINORS && i >= 0) clear_bit(i, misc_minors); mutex_unlock(&misc_mtx); - return 0; } EXPORT_SYMBOL(misc_register); @@ -281,10 +278,9 @@ static char *misc_devnode(struct device *dev, umode_t *mode) static int __init misc_init(void) { int err; + struct proc_dir_entry *ret; -#ifdef CONFIG_PROC_FS - proc_create("misc", 0, NULL, &misc_proc_fops); -#endif + ret = proc_create("misc", 0, NULL, &misc_proc_fops); misc_class = class_create(THIS_MODULE, "misc"); err = PTR_ERR(misc_class); if (IS_ERR(misc_class)) @@ -300,7 +296,8 @@ fail_printk: printk("unable to get major %d for misc devices\n", MISC_MAJOR); class_destroy(misc_class); fail_remove: - remove_proc_entry("misc", NULL); + if (ret) + remove_proc_entry("misc", NULL); return err; } subsys_initcall(misc_init); diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 9df78e2cc45d..97c2d8d433d6 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -702,7 +702,7 @@ static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq, seq_printf(seq, "%ds%s\n", nvram[10], nvram[10] < 8 ? ", no memory test" : ""); - vmode = (nvram[14] << 8) || nvram[15]; + vmode = (nvram[14] << 8) | nvram[15]; seq_printf(seq, "Video mode : %s colors, %d columns, %s %s monitor\n", colors[vmode & 7], diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index 014c9d90d297..f5a45d887a37 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -430,7 +430,7 @@ static int tosh_probe(void) int i,major,minor,day,year,month,flag; unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 }; SMMRegisters regs; - void __iomem *bios = ioremap_cache(0xf0000, 0x10000); + void __iomem *bios = ioremap(0xf0000, 0x10000); if (!bios) return -ENOMEM; diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c index d8266bc2ae35..9418300214e9 100644 --- a/drivers/char/xillybus/xillybus_pcie.c +++ b/drivers/char/xillybus/xillybus_pcie.c @@ -193,14 +193,16 @@ static int xilly_probe(struct pci_dev *pdev, } /* - * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1 - * is the right thing. But some unclever PCIe drivers report it's OK - * when the hardware drops those 64-bit PCIe packets. So trust - * nobody and use 32 bits DMA addressing in any case. + * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets, + * even when the PCIe driver claims that a 64-bit mask is OK. On the + * other hand, on some architectures, 64-bit addressing is mandatory. + * So go for the 64-bit mask only when failing is the other option. */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { endpoint->dma_using_dac = 0; + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + endpoint->dma_using_dac = 1; } else { dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n"); return -ENODEV; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 603ce97e9027..c4dcab048cb8 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -601,6 +601,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, u64 aligned_data = 0; int ret; bool signal = false; + int num_vecs = ((bufferlen != 0) ? 3 : 1); /* Setup the descriptor */ @@ -618,7 +619,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, + &signal); /* * Signalling the host is conditional on many factors: diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 4506a6623618..2f9aead4ecfc 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -347,6 +347,7 @@ enum { IDE = 0, SCSI, NIC, + ND_NIC, MAX_PERF_CHN, }; @@ -391,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui struct vmbus_channel *primary = channel->primary_channel; int next_node; struct cpumask available_mask; + struct cpumask *alloced_mask; for (i = IDE; i < MAX_PERF_CHN; i++) { if (!memcmp(type_guid->b, hp_devs[i].guid, @@ -408,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui * channel, bind it to cpu 0. */ channel->numa_node = 0; - cpumask_set_cpu(0, &channel->alloced_cpus_in_node); channel->target_cpu = 0; channel->target_vp = hv_context.vp_index[0]; return; @@ -433,21 +434,38 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui channel->numa_node = next_node; primary = channel; } + alloced_mask = &hv_context.hv_numa_map[primary->numa_node]; - if (cpumask_weight(&primary->alloced_cpus_in_node) == + if (cpumask_weight(alloced_mask) == cpumask_weight(cpumask_of_node(primary->numa_node))) { /* * We have cycled through all the CPUs in the node; * reset the alloced map. */ - cpumask_clear(&primary->alloced_cpus_in_node); + cpumask_clear(alloced_mask); } - cpumask_xor(&available_mask, &primary->alloced_cpus_in_node, + cpumask_xor(&available_mask, alloced_mask, cpumask_of_node(primary->numa_node)); - cur_cpu = cpumask_next(-1, &available_mask); - cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node); + cur_cpu = -1; + while (true) { + cur_cpu = cpumask_next(cur_cpu, &available_mask); + if (cur_cpu >= nr_cpu_ids) { + cur_cpu = -1; + cpumask_copy(&available_mask, + cpumask_of_node(primary->numa_node)); + continue; + } + + if (!cpumask_test_cpu(cur_cpu, + &primary->alloced_cpus_in_node)) { + cpumask_set_cpu(cur_cpu, + &primary->alloced_cpus_in_node); + cpumask_set_cpu(cur_cpu, alloced_mask); + break; + } + } channel->target_cpu = cur_cpu; channel->target_vp = hv_context.vp_index[cur_cpu]; @@ -469,6 +487,10 @@ void vmbus_initiate_unload(void) { struct vmbus_channel_message_header hdr; + /* Pre-Win2012R2 hosts don't support reconnect */ + if (vmbus_proto_version < VERSION_WIN8_1) + return; + init_completion(&vmbus_connection.unload_event); memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); hdr.msgtype = CHANNELMSG_UNLOAD; diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index d3943bceecc3..6341be8739ae 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -93,11 +93,14 @@ static int query_hypervisor_info(void) */ static u64 do_hypercall(u64 control, void *input, void *output) { -#ifdef CONFIG_X86_64 - u64 hv_status = 0; u64 input_address = (input) ? virt_to_phys(input) : 0; u64 output_address = (output) ? virt_to_phys(output) : 0; void *hypercall_page = hv_context.hypercall_page; +#ifdef CONFIG_X86_64 + u64 hv_status = 0; + + if (!hypercall_page) + return (u64)ULLONG_MAX; __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); __asm__ __volatile__("call *%3" : "=a" (hv_status) : @@ -112,13 +115,13 @@ static u64 do_hypercall(u64 control, void *input, void *output) u32 control_lo = control & 0xFFFFFFFF; u32 hv_status_hi = 1; u32 hv_status_lo = 1; - u64 input_address = (input) ? virt_to_phys(input) : 0; u32 input_address_hi = input_address >> 32; u32 input_address_lo = input_address & 0xFFFFFFFF; - u64 output_address = (output) ? virt_to_phys(output) : 0; u32 output_address_hi = output_address >> 32; u32 output_address_lo = output_address & 0xFFFFFFFF; - void *hypercall_page = hv_context.hypercall_page; + + if (!hypercall_page) + return (u64)ULLONG_MAX; __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), "=a"(hv_status_lo) : "d" (control_hi), @@ -130,6 +133,56 @@ static u64 do_hypercall(u64 control, void *input, void *output) #endif /* !x86_64 */ } +#ifdef CONFIG_X86_64 +static cycle_t read_hv_clock_tsc(struct clocksource *arg) +{ + cycle_t current_tick; + struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page; + + if (tsc_pg->tsc_sequence != -1) { + /* + * Use the tsc page to compute the value. + */ + + while (1) { + cycle_t tmp; + u32 sequence = tsc_pg->tsc_sequence; + u64 cur_tsc; + u64 scale = tsc_pg->tsc_scale; + s64 offset = tsc_pg->tsc_offset; + + rdtscll(cur_tsc); + /* current_tick = ((cur_tsc *scale) >> 64) + offset */ + asm("mulq %3" + : "=d" (current_tick), "=a" (tmp) + : "a" (cur_tsc), "r" (scale)); + + current_tick += offset; + if (tsc_pg->tsc_sequence == sequence) + return current_tick; + + if (tsc_pg->tsc_sequence != -1) + continue; + /* + * Fallback using MSR method. + */ + break; + } + } + rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); + return current_tick; +} + +static struct clocksource hyperv_cs_tsc = { + .name = "hyperv_clocksource_tsc_page", + .rating = 425, + .read = read_hv_clock_tsc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; +#endif + + /* * hv_init - Main initialization routine. * @@ -139,7 +192,9 @@ int hv_init(void) { int max_leaf; union hv_x64_msr_hypercall_contents hypercall_msr; + union hv_x64_msr_hypercall_contents tsc_msr; void *virtaddr = NULL; + void *va_tsc = NULL; memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.synic_message_page, 0, @@ -183,6 +238,22 @@ int hv_init(void) hv_context.hypercall_page = virtaddr; +#ifdef CONFIG_X86_64 + if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { + va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); + if (!va_tsc) + goto cleanup; + hv_context.tsc_page = va_tsc; + + rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); + + tsc_msr.enable = 1; + tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc); + + wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); + clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); + } +#endif return 0; cleanup: @@ -216,6 +287,21 @@ void hv_cleanup(void) vfree(hv_context.hypercall_page); hv_context.hypercall_page = NULL; } + +#ifdef CONFIG_X86_64 + /* + * Cleanup the TSC page based CS. + */ + if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { + clocksource_change_rating(&hyperv_cs_tsc, 10); + clocksource_unregister(&hyperv_cs_tsc); + + hypercall_msr.as_uint64 = 0; + wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); + vfree(hv_context.tsc_page); + hv_context.tsc_page = NULL; + } +#endif } /* @@ -271,7 +357,7 @@ static int hv_ce_set_next_event(unsigned long delta, { cycle_t current_tick; - WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); + WARN_ON(!clockevent_state_oneshot(evt)); rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); current_tick += delta; @@ -279,31 +365,24 @@ static int hv_ce_set_next_event(unsigned long delta, return 0; } -static void hv_ce_setmode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int hv_ce_shutdown(struct clock_event_device *evt) +{ + wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0); + wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0); + + return 0; +} + +static int hv_ce_set_oneshot(struct clock_event_device *evt) { union hv_timer_config timer_cfg; - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* unsupported */ - break; - - case CLOCK_EVT_MODE_ONESHOT: - timer_cfg.enable = 1; - timer_cfg.auto_enable = 1; - timer_cfg.sintx = VMBUS_MESSAGE_SINT; - wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64); - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0); - wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0); - break; - case CLOCK_EVT_MODE_RESUME: - break; - } + timer_cfg.enable = 1; + timer_cfg.auto_enable = 1; + timer_cfg.sintx = VMBUS_MESSAGE_SINT; + wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64); + + return 0; } static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) @@ -318,7 +397,8 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) * references to the hv_vmbus module making it impossible to unload. */ - dev->set_mode = hv_ce_setmode; + dev->set_state_shutdown = hv_ce_shutdown; + dev->set_state_oneshot = hv_ce_set_oneshot; dev->set_next_event = hv_ce_set_next_event; } @@ -329,6 +409,13 @@ int hv_synic_alloc(void) size_t ced_size = sizeof(struct clock_event_device); int cpu; + hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids, + GFP_ATOMIC); + if (hv_context.hv_numa_map == NULL) { + pr_err("Unable to allocate NUMA map\n"); + goto err; + } + for_each_online_cpu(cpu) { hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); if (hv_context.event_dpc[cpu] == NULL) { @@ -342,6 +429,7 @@ int hv_synic_alloc(void) pr_err("Unable to allocate clock event device\n"); goto err; } + hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); hv_context.synic_message_page[cpu] = @@ -390,6 +478,7 @@ void hv_synic_free(void) { int cpu; + kfree(hv_context.hv_numa_map); for_each_online_cpu(cpu) hv_synic_free_cpu(cpu); } @@ -503,8 +592,7 @@ void hv_synic_cleanup(void *arg) /* Turn off clockevent device */ if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) - hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN, - hv_context.clk_evt[cpu]); + hv_ce_shutdown(hv_context.clk_evt[cpu]); rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); @@ -530,6 +618,4 @@ void hv_synic_cleanup(void *arg) rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); sctrl.enable = 0; wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); - - hv_synic_free_cpu(cpu); } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 8a725cd69ad7..b853b4b083bd 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -62,11 +62,13 @@ enum { DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3), DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0), + DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0), DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1, DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2, + DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3, - DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8 + DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10 }; @@ -1296,13 +1298,25 @@ static void version_resp(struct hv_dynmem_device *dm, if (dm->next_version == 0) goto version_error; - dm->next_version = 0; memset(&version_req, 0, sizeof(struct dm_version_request)); version_req.hdr.type = DM_VERSION_REQUEST; version_req.hdr.size = sizeof(struct dm_version_request); version_req.hdr.trans_id = atomic_inc_return(&trans_id); - version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7; - version_req.is_last_attempt = 1; + version_req.version.version = dm->next_version; + + /* + * Set the next version to try in case current version fails. + * Win7 protocol ought to be the last one to try. + */ + switch (version_req.version.version) { + case DYNMEM_PROTOCOL_VERSION_WIN8: + dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7; + version_req.is_last_attempt = 0; + break; + default: + dm->next_version = 0; + version_req.is_last_attempt = 1; + } ret = vmbus_sendpacket(dm->dev->channel, &version_req, sizeof(struct dm_version_request), @@ -1442,7 +1456,7 @@ static int balloon_probe(struct hv_device *dev, dm_device.dev = dev; dm_device.state = DM_INITIALIZING; - dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7; + dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8; init_completion(&dm_device.host_event); init_completion(&dm_device.config_event); INIT_LIST_HEAD(&dm_device.ha_region_list); @@ -1474,7 +1488,7 @@ static int balloon_probe(struct hv_device *dev, version_req.hdr.type = DM_VERSION_REQUEST; version_req.hdr.size = sizeof(struct dm_version_request); version_req.hdr.trans_id = atomic_inc_return(&trans_id); - version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8; + version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10; version_req.is_last_attempt = 0; ret = vmbus_sendpacket(dev->channel, &version_req, diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index b50dd330cf31..db4b887b889d 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -116,7 +116,7 @@ static int fcopy_handle_handshake(u32 version) static void fcopy_send_data(struct work_struct *dummy) { - struct hv_start_fcopy smsg_out; + struct hv_start_fcopy *smsg_out = NULL; int operation = fcopy_transaction.fcopy_msg->operation; struct hv_start_fcopy *smsg_in; void *out_src; @@ -136,21 +136,24 @@ static void fcopy_send_data(struct work_struct *dummy) switch (operation) { case START_FILE_COPY: out_len = sizeof(struct hv_start_fcopy); - memset(&smsg_out, 0, out_len); - smsg_out.hdr.operation = operation; + smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL); + if (!smsg_out) + return; + + smsg_out->hdr.operation = operation; smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg; utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH, UTF16_LITTLE_ENDIAN, - (__u8 *)&smsg_out.file_name, W_MAX_PATH - 1); + (__u8 *)&smsg_out->file_name, W_MAX_PATH - 1); utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH, UTF16_LITTLE_ENDIAN, - (__u8 *)&smsg_out.path_name, W_MAX_PATH - 1); + (__u8 *)&smsg_out->path_name, W_MAX_PATH - 1); - smsg_out.copy_flags = smsg_in->copy_flags; - smsg_out.file_size = smsg_in->file_size; - out_src = &smsg_out; + smsg_out->copy_flags = smsg_in->copy_flags; + smsg_out->file_size = smsg_in->file_size; + out_src = smsg_out; break; default: @@ -168,6 +171,8 @@ static void fcopy_send_data(struct work_struct *dummy) fcopy_transaction.state = HVUTIL_READY; } } + kfree(smsg_out); + return; } diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index d85798d5992c..74c38a9f34a6 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -353,6 +353,9 @@ kvp_send_key(struct work_struct *dummy) return; message = kzalloc(sizeof(*message), GFP_KERNEL); + if (!message) + return; + message->kvp_hdr.operation = operation; message->kvp_hdr.pool = pool; in_msg = kvp_transaction.kvp_msg; diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index ea7ba5ef16a9..6a9d80a5332d 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -186,7 +186,7 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len) return -EINVAL; } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) { cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC); - if (!msg) + if (!cn_msg) return -ENOMEM; cn_msg->id.idx = hvt->cn_id.idx; cn_msg->id.val = hvt->cn_id.val; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index cddc0c9f6bf9..3d70e36c918e 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -141,7 +141,7 @@ struct hv_port_info { struct { u32 target_sint; u32 target_vp; - u16 base_flag_bumber; + u16 base_flag_number; u16 flag_count; u32 rsvdz; } event_port_info; @@ -517,6 +517,7 @@ struct hv_context { u64 guestid; void *hypercall_page; + void *tsc_page; bool synic_initialized; @@ -551,10 +552,23 @@ struct hv_context { * Support PV clockevent device. */ struct clock_event_device *clk_evt[NR_CPUS]; + /* + * To manage allocations in a NUMA node. + * Array indexed by numa node ID. + */ + struct cpumask *hv_numa_map; }; extern struct hv_context hv_context; +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; + u64 reserved2[509]; +}; + struct hv_ring_buffer_debug_info { u32 current_interrupt_mask; u32 current_read_index; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 6361d124f67d..70a1a9a22f87 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -103,10 +103,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) * there is room for the producer to send the pending packet. */ -static bool hv_need_to_signal_on_read(u32 old_rd, - struct hv_ring_buffer_info *rbi) +static bool hv_need_to_signal_on_read(u32 prev_write_sz, + struct hv_ring_buffer_info *rbi) { - u32 prev_write_sz; u32 cur_write_sz; u32 r_size; u32 write_loc = rbi->ring_buffer->write_index; @@ -123,10 +122,6 @@ static bool hv_need_to_signal_on_read(u32 old_rd, cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : read_loc - write_loc; - prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) : - old_rd - write_loc; - - if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) return true; @@ -517,7 +512,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, u32 next_read_location = 0; u64 prev_indices = 0; unsigned long flags; - u32 old_read; if (buflen <= 0) return -EINVAL; @@ -528,8 +522,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, &bytes_avail_toread, &bytes_avail_towrite); - old_read = bytes_avail_toread; - /* Make sure there is something to read */ if (bytes_avail_toread < buflen) { spin_unlock_irqrestore(&inring_info->ring_lock, flags); @@ -560,7 +552,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, spin_unlock_irqrestore(&inring_info->ring_lock, flags); - *signal = hv_need_to_signal_on_read(old_read, inring_info); + *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); return 0; } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index cf204005ee78..f19b6f7a467a 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -39,6 +39,8 @@ #include <asm/mshyperv.h> #include <linux/notifier.h> #include <linux/ptrace.h> +#include <linux/screen_info.h> +#include <linux/kdebug.h> #include "hyperv_vmbus.h" static struct acpi_device *hv_acpi_dev; @@ -48,12 +50,18 @@ static struct completion probe_event; static int irq; -static int hyperv_panic_event(struct notifier_block *nb, - unsigned long event, void *ptr) +static void hyperv_report_panic(struct pt_regs *regs) { - struct pt_regs *regs; + static bool panic_reported; - regs = current_pt_regs(); + /* + * We prefer to report panic on 'die' chain as we have proper + * registers to report, but if we miss it (e.g. on BUG()) we need + * to report it on 'panic'. + */ + if (panic_reported) + return; + panic_reported = true; wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip); wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax); @@ -65,18 +73,37 @@ static int hyperv_panic_event(struct notifier_block *nb, * Let Hyper-V know there is crash data available */ wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY); +} + +static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, + void *args) +{ + struct pt_regs *regs; + + regs = current_pt_regs(); + + hyperv_report_panic(regs); return NOTIFY_DONE; } +static int hyperv_die_event(struct notifier_block *nb, unsigned long val, + void *args) +{ + struct die_args *die = (struct die_args *)args; + struct pt_regs *regs = die->regs; + + hyperv_report_panic(regs); + return NOTIFY_DONE; +} + +static struct notifier_block hyperv_die_block = { + .notifier_call = hyperv_die_event, +}; static struct notifier_block hyperv_panic_block = { .notifier_call = hyperv_panic_event, }; -struct resource hyperv_mmio = { - .name = "hyperv mmio", - .flags = IORESOURCE_MEM, -}; -EXPORT_SYMBOL_GPL(hyperv_mmio); +struct resource *hyperv_mmio; static int vmbus_exists(void) { @@ -414,6 +441,43 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, } static DEVICE_ATTR_RO(in_write_bytes_avail); +static ssize_t channel_vp_mapping_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct hv_device *hv_dev = device_to_hv_device(dev); + struct vmbus_channel *channel = hv_dev->channel, *cur_sc; + unsigned long flags; + int buf_size = PAGE_SIZE, n_written, tot_written; + struct list_head *cur; + + if (!channel) + return -ENODEV; + + tot_written = snprintf(buf, buf_size, "%u:%u\n", + channel->offermsg.child_relid, channel->target_cpu); + + spin_lock_irqsave(&channel->lock, flags); + + list_for_each(cur, &channel->sc_list) { + if (tot_written >= buf_size - 1) + break; + + cur_sc = list_entry(cur, struct vmbus_channel, sc_list); + n_written = scnprintf(buf + tot_written, + buf_size - tot_written, + "%u:%u\n", + cur_sc->offermsg.child_relid, + cur_sc->target_cpu); + tot_written += n_written; + } + + spin_unlock_irqrestore(&channel->lock, flags); + + return tot_written; +} +static DEVICE_ATTR_RO(channel_vp_mapping); + /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ static struct attribute *vmbus_attrs[] = { &dev_attr_id.attr, @@ -438,6 +502,7 @@ static struct attribute *vmbus_attrs[] = { &dev_attr_in_write_index.attr, &dev_attr_in_read_bytes_avail.attr, &dev_attr_in_write_bytes_avail.attr, + &dev_attr_channel_vp_mapping.attr, NULL, }; ATTRIBUTE_GROUPS(vmbus); @@ -763,38 +828,6 @@ static void vmbus_isr(void) } } -#ifdef CONFIG_HOTPLUG_CPU -static int hyperv_cpu_disable(void) -{ - return -ENOSYS; -} - -static void hv_cpu_hotplug_quirk(bool vmbus_loaded) -{ - static void *previous_cpu_disable; - - /* - * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8, - * ...) is not supported at this moment as channel interrupts are - * distributed across all of them. - */ - - if ((vmbus_proto_version == VERSION_WS2008) || - (vmbus_proto_version == VERSION_WIN7)) - return; - - if (vmbus_loaded) { - previous_cpu_disable = smp_ops.cpu_disable; - smp_ops.cpu_disable = hyperv_cpu_disable; - pr_notice("CPU offlining is not supported by hypervisor\n"); - } else if (previous_cpu_disable) - smp_ops.cpu_disable = previous_cpu_disable; -} -#else -static void hv_cpu_hotplug_quirk(bool vmbus_loaded) -{ -} -#endif /* * vmbus_bus_init -Main vmbus driver initialization routine. @@ -836,12 +869,14 @@ static int vmbus_bus_init(int irq) if (ret) goto err_alloc; - hv_cpu_hotplug_quirk(true); + if (vmbus_proto_version > VERSION_WIN7) + cpu_hotplug_disable(); /* * Only register if the crash MSRs are available */ - if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { + register_die_notifier(&hyperv_die_block); atomic_notifier_chain_register(&panic_notifier_list, &hyperv_panic_block); } @@ -863,8 +898,8 @@ err_cleanup: } /** - * __vmbus_child_driver_register - Register a vmbus's driver - * @drv: Pointer to driver structure you want to register + * __vmbus_child_driver_register() - Register a vmbus's driver + * @hv_driver: Pointer to driver structure you want to register * @owner: owner module of the drv * @mod_name: module name string * @@ -896,7 +931,8 @@ EXPORT_SYMBOL_GPL(__vmbus_driver_register); /** * vmbus_driver_unregister() - Unregister a vmbus's driver - * @drv: Pointer to driver structure you want to un-register + * @hv_driver: Pointer to driver structure you want to + * un-register * * Un-register the given driver that was previous registered with a call to * vmbus_driver_register() @@ -982,30 +1018,184 @@ void vmbus_device_unregister(struct hv_device *device_obj) /* - * VMBUS is an acpi enumerated device. Get the the information we + * VMBUS is an acpi enumerated device. Get the information we * need from DSDT. */ - +#define VTPM_BASE_ADDRESS 0xfed40000 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) { + resource_size_t start = 0; + resource_size_t end = 0; + struct resource *new_res; + struct resource **old_res = &hyperv_mmio; + struct resource **prev_res = NULL; + switch (res->type) { case ACPI_RESOURCE_TYPE_IRQ: irq = res->data.irq.interrupts[0]; + return AE_OK; + + /* + * "Address" descriptors are for bus windows. Ignore + * "memory" descriptors, which are for registers on + * devices. + */ + case ACPI_RESOURCE_TYPE_ADDRESS32: + start = res->data.address32.address.minimum; + end = res->data.address32.address.maximum; break; case ACPI_RESOURCE_TYPE_ADDRESS64: - hyperv_mmio.start = res->data.address64.address.minimum; - hyperv_mmio.end = res->data.address64.address.maximum; + start = res->data.address64.address.minimum; + end = res->data.address64.address.maximum; break; + + default: + /* Unused resource type */ + return AE_OK; + } + /* + * Ignore ranges that are below 1MB, as they're not + * necessary or useful here. + */ + if (end < 0x100000) + return AE_OK; + + new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC); + if (!new_res) + return AE_NO_MEMORY; + + /* If this range overlaps the virtual TPM, truncate it. */ + if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS) + end = VTPM_BASE_ADDRESS; + + new_res->name = "hyperv mmio"; + new_res->flags = IORESOURCE_MEM; + new_res->start = start; + new_res->end = end; + + do { + if (!*old_res) { + *old_res = new_res; + break; + } + + if ((*old_res)->end < new_res->start) { + new_res->sibling = *old_res; + if (prev_res) + (*prev_res)->sibling = new_res; + *old_res = new_res; + break; + } + + prev_res = old_res; + old_res = &(*old_res)->sibling; + + } while (1); return AE_OK; } +static int vmbus_acpi_remove(struct acpi_device *device) +{ + struct resource *cur_res; + struct resource *next_res; + + if (hyperv_mmio) { + for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { + next_res = cur_res->sibling; + kfree(cur_res); + } + } + + return 0; +} + +/** + * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. + * @new: If successful, supplied a pointer to the + * allocated MMIO space. + * @device_obj: Identifies the caller + * @min: Minimum guest physical address of the + * allocation + * @max: Maximum guest physical address + * @size: Size of the range to be allocated + * @align: Alignment of the range to be allocated + * @fb_overlap_ok: Whether this allocation can be allowed + * to overlap the video frame buffer. + * + * This function walks the resources granted to VMBus by the + * _CRS object in the ACPI namespace underneath the parent + * "bridge" whether that's a root PCI bus in the Generation 1 + * case or a Module Device in the Generation 2 case. It then + * attempts to allocate from the global MMIO pool in a way that + * matches the constraints supplied in these parameters and by + * that _CRS. + * + * Return: 0 on success, -errno on failure + */ +int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, + resource_size_t min, resource_size_t max, + resource_size_t size, resource_size_t align, + bool fb_overlap_ok) +{ + struct resource *iter; + resource_size_t range_min, range_max, start, local_min, local_max; + const char *dev_n = dev_name(&device_obj->device); + u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1); + int i; + + for (iter = hyperv_mmio; iter; iter = iter->sibling) { + if ((iter->start >= max) || (iter->end <= min)) + continue; + + range_min = iter->start; + range_max = iter->end; + + /* If this range overlaps the frame buffer, split it into + two tries. */ + for (i = 0; i < 2; i++) { + local_min = range_min; + local_max = range_max; + if (fb_overlap_ok || (range_min >= fb_end) || + (range_max <= screen_info.lfb_base)) { + i++; + } else { + if ((range_min <= screen_info.lfb_base) && + (range_max >= screen_info.lfb_base)) { + /* + * The frame buffer is in this window, + * so trim this into the part that + * preceeds the frame buffer. + */ + local_max = screen_info.lfb_base - 1; + range_min = fb_end; + } else { + range_min = fb_end; + continue; + } + } + + start = (local_min + align - 1) & ~(align - 1); + for (; start + size - 1 <= local_max; start += align) { + *new = request_mem_region_exclusive(start, size, + dev_n); + if (*new) + return 0; + } + } + } + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); + static int vmbus_acpi_add(struct acpi_device *device) { acpi_status result; int ret_val = -ENODEV; + struct acpi_device *ancestor; hv_acpi_dev = device; @@ -1015,35 +1205,27 @@ static int vmbus_acpi_add(struct acpi_device *device) if (ACPI_FAILURE(result)) goto acpi_walk_err; /* - * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that - * has the mmio ranges. Get that. + * Some ancestor of the vmbus acpi device (Gen1 or Gen2 + * firmware) is the VMOD that has the mmio ranges. Get that. */ - if (device->parent) { - result = acpi_walk_resources(device->parent->handle, - METHOD_NAME__CRS, - vmbus_walk_resources, NULL); + for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) { + result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS, + vmbus_walk_resources, NULL); if (ACPI_FAILURE(result)) - goto acpi_walk_err; - if (hyperv_mmio.start && hyperv_mmio.end) - request_resource(&iomem_resource, &hyperv_mmio); + continue; + if (hyperv_mmio) + break; } ret_val = 0; acpi_walk_err: complete(&probe_event); + if (ret_val) + vmbus_acpi_remove(device); return ret_val; } -static int vmbus_acpi_remove(struct acpi_device *device) -{ - int ret = 0; - - if (hyperv_mmio.start && hyperv_mmio.end) - ret = release_resource(&hyperv_mmio); - return ret; -} - static const struct acpi_device_id vmbus_acpi_device_ids[] = { {"VMBUS", 0}, {"VMBus", 0}, @@ -1060,6 +1242,29 @@ static struct acpi_driver vmbus_acpi_driver = { }, }; +static void hv_kexec_handler(void) +{ + int cpu; + + hv_synic_clockevents_cleanup(); + vmbus_initiate_unload(); + for_each_online_cpu(cpu) + smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); + hv_cleanup(); +}; + +static void hv_crash_handler(struct pt_regs *regs) +{ + vmbus_initiate_unload(); + /* + * In crash handler we can't schedule synic cleanup for all CPUs, + * doing the cleanup for current CPU only. This should be sufficient + * for kdump. + */ + hv_synic_cleanup(NULL); + hv_cleanup(); +}; + static int __init hv_acpi_init(void) { int ret, t; @@ -1092,6 +1297,9 @@ static int __init hv_acpi_init(void) if (ret) goto cleanup; + hv_setup_kexec_handler(hv_kexec_handler); + hv_setup_crash_handler(hv_crash_handler); + return 0; cleanup: @@ -1104,13 +1312,16 @@ static void __exit vmbus_exit(void) { int cpu; + hv_remove_kexec_handler(); + hv_remove_crash_handler(); vmbus_connection.conn_state = DISCONNECTED; hv_synic_clockevents_cleanup(); vmbus_disconnect(); hv_remove_vmbus_irq(); tasklet_kill(&msg_dpc); vmbus_free_channels(); - if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { + if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { + unregister_die_notifier(&hyperv_die_block); atomic_notifier_chain_unregister(&panic_notifier_list, &hyperv_panic_block); } @@ -1120,8 +1331,10 @@ static void __exit vmbus_exit(void) tasklet_kill(hv_context.event_dpc[cpu]); smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); } + hv_synic_free(); acpi_bus_unregister_driver(&vmbus_acpi_driver); - hv_cpu_hotplug_quirk(false); + if (vmbus_proto_version > VERSION_WIN7) + cpu_hotplug_enable(); } diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h index 098ffbec0a44..b4481eb29304 100644 --- a/drivers/hwtracing/coresight/coresight-etm.h +++ b/drivers/hwtracing/coresight/coresight-etm.h @@ -183,7 +183,9 @@ * @seq_13_event: event causing the transition from 1 to 3. * @seq_curr_state: current value of the sequencer register. * @ctxid_idx: index for the context ID registers. - * @ctxid_val: value for the context ID to trigger on. + * @ctxid_pid: value for the context ID to trigger on. + * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise + * the same value of ctxid_pid. * @ctxid_mask: mask applicable to all the context IDs. * @sync_freq: Synchronisation frequency. * @timestamp_event: Defines an event that requests the insertion @@ -235,7 +237,8 @@ struct etm_drvdata { u32 seq_13_event; u32 seq_curr_state; u8 ctxid_idx; - u32 ctxid_val[ETM_MAX_CTXID_CMP]; + u32 ctxid_pid[ETM_MAX_CTXID_CMP]; + u32 ctxid_vpid[ETM_MAX_CTXID_CMP]; u32 ctxid_mask; u32 sync_freq; u32 timestamp_event; diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index 018a00fda611..bf2476ed9356 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -237,8 +237,11 @@ static void etm_set_default(struct etm_drvdata *drvdata) drvdata->seq_curr_state = 0x0; drvdata->ctxid_idx = 0x0; - for (i = 0; i < drvdata->nr_ctxid_cmp; i++) - drvdata->ctxid_val[i] = 0x0; + for (i = 0; i < drvdata->nr_ctxid_cmp; i++) { + drvdata->ctxid_pid[i] = 0x0; + drvdata->ctxid_vpid[i] = 0x0; + } + drvdata->ctxid_mask = 0x0; } @@ -289,7 +292,7 @@ static void etm_enable_hw(void *info) for (i = 0; i < drvdata->nr_ext_out; i++) etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); for (i = 0; i < drvdata->nr_ctxid_cmp; i++) - etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i)); + etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i)); etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); /* No external input selected */ @@ -1386,38 +1389,41 @@ static ssize_t ctxid_idx_store(struct device *dev, } static DEVICE_ATTR_RW(ctxid_idx); -static ssize_t ctxid_val_show(struct device *dev, +static ssize_t ctxid_pid_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long val; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); spin_lock(&drvdata->spinlock); - val = drvdata->ctxid_val[drvdata->ctxid_idx]; + val = drvdata->ctxid_vpid[drvdata->ctxid_idx]; spin_unlock(&drvdata->spinlock); return sprintf(buf, "%#lx\n", val); } -static ssize_t ctxid_val_store(struct device *dev, +static ssize_t ctxid_pid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; - unsigned long val; + unsigned long vpid, pid; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); - ret = kstrtoul(buf, 16, &val); + ret = kstrtoul(buf, 16, &vpid); if (ret) return ret; + pid = coresight_vpid_to_pid(vpid); + spin_lock(&drvdata->spinlock); - drvdata->ctxid_val[drvdata->ctxid_idx] = val; + drvdata->ctxid_pid[drvdata->ctxid_idx] = pid; + drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid; spin_unlock(&drvdata->spinlock); return size; } -static DEVICE_ATTR_RW(ctxid_val); +static DEVICE_ATTR_RW(ctxid_pid); static ssize_t ctxid_mask_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1609,7 +1615,7 @@ static struct attribute *coresight_etm_attrs[] = { &dev_attr_seq_13_event.attr, &dev_attr_seq_curr_state.attr, &dev_attr_ctxid_idx.attr, - &dev_attr_ctxid_val.attr, + &dev_attr_ctxid_pid.attr, &dev_attr_ctxid_mask.attr, &dev_attr_sync_freq.attr, &dev_attr_timestamp_event.attr, @@ -1912,6 +1918,11 @@ static struct amba_id etm_ids[] = { .mask = 0x0003ffff, .data = "PTM 1.1", }, + { /* PTM 1.1 Qualcomm */ + .id = 0x0003006f, + .mask = 0x0003ffff, + .data = "PTM 1.1", + }, { 0, 0}, }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 1312e993c501..254a81a4e6f4 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -155,7 +155,7 @@ static void etm4_enable_hw(void *info) drvdata->base + TRCACATRn(i)); } for (i = 0; i < drvdata->numcidc; i++) - writeq_relaxed(drvdata->ctxid_val[i], + writeq_relaxed(drvdata->ctxid_pid[i], drvdata->base + TRCCIDCVRn(i)); writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); @@ -506,8 +506,11 @@ static ssize_t reset_store(struct device *dev, } drvdata->ctxid_idx = 0x0; - for (i = 0; i < drvdata->numcidc; i++) - drvdata->ctxid_val[i] = 0x0; + for (i = 0; i < drvdata->numcidc; i++) { + drvdata->ctxid_pid[i] = 0x0; + drvdata->ctxid_vpid[i] = 0x0; + } + drvdata->ctxid_mask0 = 0x0; drvdata->ctxid_mask1 = 0x0; @@ -1815,7 +1818,7 @@ static ssize_t ctxid_idx_store(struct device *dev, } static DEVICE_ATTR_RW(ctxid_idx); -static ssize_t ctxid_val_show(struct device *dev, +static ssize_t ctxid_pid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1825,17 +1828,17 @@ static ssize_t ctxid_val_show(struct device *dev, spin_lock(&drvdata->spinlock); idx = drvdata->ctxid_idx; - val = (unsigned long)drvdata->ctxid_val[idx]; + val = (unsigned long)drvdata->ctxid_vpid[idx]; spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } -static ssize_t ctxid_val_store(struct device *dev, +static ssize_t ctxid_pid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { u8 idx; - unsigned long val; + unsigned long vpid, pid; struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); /* @@ -1845,16 +1848,19 @@ static ssize_t ctxid_val_store(struct device *dev, */ if (!drvdata->ctxid_size || !drvdata->numcidc) return -EINVAL; - if (kstrtoul(buf, 16, &val)) + if (kstrtoul(buf, 16, &vpid)) return -EINVAL; + pid = coresight_vpid_to_pid(vpid); + spin_lock(&drvdata->spinlock); idx = drvdata->ctxid_idx; - drvdata->ctxid_val[idx] = (u64)val; + drvdata->ctxid_pid[idx] = (u64)pid; + drvdata->ctxid_vpid[idx] = (u64)vpid; spin_unlock(&drvdata->spinlock); return size; } -static DEVICE_ATTR_RW(ctxid_val); +static DEVICE_ATTR_RW(ctxid_pid); static ssize_t ctxid_masks_show(struct device *dev, struct device_attribute *attr, @@ -1949,7 +1955,7 @@ static ssize_t ctxid_masks_store(struct device *dev, */ for (j = 0; j < 8; j++) { if (maskbyte & 1) - drvdata->ctxid_val[i] &= ~(0xFF << (j * 8)); + drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8)); maskbyte >>= 1; } /* Select the next ctxid comparator mask value */ @@ -2193,7 +2199,7 @@ static struct attribute *coresight_etmv4_attrs[] = { &dev_attr_res_idx.attr, &dev_attr_res_ctrl.attr, &dev_attr_ctxid_idx.attr, - &dev_attr_ctxid_val.attr, + &dev_attr_ctxid_pid.attr, &dev_attr_ctxid_masks.attr, &dev_attr_vmid_idx.attr, &dev_attr_vmid_val.attr, @@ -2513,8 +2519,11 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata) drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; } - for (i = 0; i < drvdata->numcidc; i++) - drvdata->ctxid_val[i] = 0x0; + for (i = 0; i < drvdata->numcidc; i++) { + drvdata->ctxid_pid[i] = 0x0; + drvdata->ctxid_vpid[i] = 0x0; + } + drvdata->ctxid_mask0 = 0x0; drvdata->ctxid_mask1 = 0x0; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index e08e983dd2d9..c34100205ca9 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -265,7 +265,9 @@ * @addr_type: Current status of the comparator register. * @ctxid_idx: Context ID index selector. * @ctxid_size: Size of the context ID field to consider. - * @ctxid_val: Value of the context ID comparator. + * @ctxid_pid: Value of the context ID comparator. + * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise + * the same value of ctxid_pid. * @ctxid_mask0:Context ID comparator mask for comparator 0-3. * @ctxid_mask1:Context ID comparator mask for comparator 4-7. * @vmid_idx: VM ID index selector. @@ -352,7 +354,8 @@ struct etmv4_drvdata { u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP]; u8 ctxid_idx; u8 ctxid_size; - u64 ctxid_val[ETMv4_MAX_CTXID_CMP]; + u64 ctxid_pid[ETMv4_MAX_CTXID_CMP]; + u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP]; u32 ctxid_mask0; u32 ctxid_mask1; u8 vmid_idx; diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 7974b7c3da6b..963ac197c253 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -12,7 +12,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> @@ -184,17 +183,7 @@ static struct platform_driver replicator_driver = { }, }; -static int __init replicator_init(void) -{ - return platform_driver_register(&replicator_driver); -} -module_init(replicator_init); - -static void __exit replicator_exit(void) -{ - platform_driver_unregister(&replicator_driver); -} -module_exit(replicator_exit); +builtin_platform_driver(replicator_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CoreSight Replicator driver"); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 720ceeb7fa9b..80a439543259 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1919,9 +1919,7 @@ int __init dm_interface_init(void) void dm_interface_exit(void) { - if (misc_deregister(&_dm_misc) < 0) - DMERR("misc_deregister failed for control device"); - + misc_deregister(&_dm_misc); dm_hash_exit(); } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 42c38525904b..ccccc2943f2f 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -271,6 +271,16 @@ config HP_ILO To compile this driver as a module, choose M here: the module will be called hpilo. +config QCOM_COINCELL + tristate "Qualcomm coincell charger support" + depends on MFD_SPMI_PMIC || COMPILE_TEST + help + This driver supports the coincell block found inside of + Qualcomm PMICs. The coincell charger provides a means to + charge a coincell battery or backup capacitor which is used + to maintain PMIC register and RTC state in the absence of + external power. + config SGI_GRU tristate "SGI GRU driver" depends on X86_UV && SMP diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index d056fb7186fe..537d7f3b78da 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_LKDTM) += lkdtm.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c index 705b881e186d..d11187d36ddd 100644 --- a/drivers/misc/ad525x_dpot-i2c.c +++ b/drivers/misc/ad525x_dpot-i2c.c @@ -106,7 +106,6 @@ MODULE_DEVICE_TABLE(i2c, ad_dpot_id); static struct i2c_driver ad_dpot_i2c_driver = { .driver = { .name = "ad_dpot", - .owner = THIS_MODULE, }, .probe = ad_dpot_i2c_probe, .remove = ad_dpot_i2c_remove, diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c index 3739ffa9cdf1..a3e789b85cc8 100644 --- a/drivers/misc/apds990x.c +++ b/drivers/misc/apds990x.c @@ -1275,7 +1275,6 @@ static const struct dev_pm_ops apds990x_pm_ops = { static struct i2c_driver apds990x_driver = { .driver = { .name = "apds990x", - .owner = THIS_MODULE, .pm = &apds990x_pm_ops, }, .probe = apds990x_probe, diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index b756381b8250..753d7ecdadaa 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c @@ -1396,7 +1396,6 @@ static const struct dev_pm_ops bh1770_pm_ops = { static struct i2c_driver bh1770_driver = { .driver = { .name = "bh1770glc", - .owner = THIS_MODULE, .pm = &bh1770_pm_ops, }, .probe = bh1770_probe, diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c index a7c16295b816..f35c218aaa1a 100644 --- a/drivers/misc/bmp085-i2c.c +++ b/drivers/misc/bmp085-i2c.c @@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(i2c, bmp085_id); static struct i2c_driver bmp085_i2c_driver = { .driver = { - .owner = THIS_MODULE, .name = BMP085_NAME, }, .id_table = bmp085_id, diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 31f38bc71a3d..87cd747bb511 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -443,12 +443,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, struct afu_config_record *cr = to_cr(kobj); struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj)); - u64 i, j, val, size = afu->crs_len; - - if (off > size) - return 0; - if (off + count > size) - count = size - off; + u64 i, j, val; for (i = 0; i < count;) { val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7); diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c index b909fb30232a..c7112276a039 100644 --- a/drivers/misc/ds1682.c +++ b/drivers/misc/ds1682.c @@ -148,12 +148,6 @@ static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj, dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n", buf, off, count); - if (off >= DS1682_EEPROM_SIZE) - return 0; - - if (off + count > DS1682_EEPROM_SIZE) - count = DS1682_EEPROM_SIZE - off; - rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off, count, buf); if (rc < 0) @@ -171,12 +165,6 @@ static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj, dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n", buf, off, count); - if (off >= DS1682_EEPROM_SIZE) - return -ENOSPC; - - if (off + count > DS1682_EEPROM_SIZE) - count = DS1682_EEPROM_SIZE - off; - /* Write out to the device */ if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off, count, buf) < 0) diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 9536852fd4c6..04f2e1fa9dd1 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -96,17 +96,4 @@ config EEPROM_DIGSY_MTC_CFG If unsure, say N. -config EEPROM_SUNXI_SID - tristate "Allwinner sunxi security ID support" - depends on ARCH_SUNXI && SYSFS - help - This is a driver for the 'security ID' available on various Allwinner - devices. - - Due to the potential risks involved with changing e-fuses, - this driver is read-only. - - This driver can also be built as a module. If so, the module - will be called sunxi_sid. - endmenu diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile index 9507aec95e94..fc1e81d29267 100644 --- a/drivers/misc/eeprom/Makefile +++ b/drivers/misc/eeprom/Makefile @@ -4,5 +4,4 @@ obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o obj-$(CONFIG_EEPROM_MAX6875) += max6875.o obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o -obj-$(CONFIG_EEPROM_SUNXI_SID) += sunxi_sid.o obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 6ded3dc36644..2b254f3a1154 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -686,7 +686,6 @@ static int at24_remove(struct i2c_client *client) static struct i2c_driver at24_driver = { .driver = { .name = "at24", - .owner = THIS_MODULE, }, .probe = at24_probe, .remove = at24_remove, diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index b432873def96..7342fd637031 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c @@ -88,11 +88,6 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, struct eeprom_data *data = i2c_get_clientdata(client); u8 slice; - if (off > EEPROM_SIZE) - return 0; - if (off + count > EEPROM_SIZE) - count = EEPROM_SIZE - off; - /* Only refresh slices which contain requested bytes */ for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++) eeprom_update_client(client, slice); diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 9ebeacdb8ec4..a6bd9e3fe9d3 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -48,13 +48,6 @@ eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj, dev = container_of(kobj, struct device, kobj); edev = dev_get_drvdata(dev); - if (unlikely(off >= edev->bin.size)) - return 0; - if ((off + count) > edev->bin.size) - count = edev->bin.size - off; - if (unlikely(!count)) - return count; - cmd_addr = OP_READ << edev->addrlen; if (edev->addrlen == 7) { @@ -200,13 +193,6 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj, dev = container_of(kobj, struct device, kobj); edev = dev_get_drvdata(dev); - if (unlikely(off >= edev->bin.size)) - return -EFBIG; - if ((off + count) > edev->bin.size) - count = edev->bin.size - off; - if (unlikely(!count)) - return count; - /* only write even number of bytes on 16-bit devices */ if (edev->addrlen == 6) { step = 2; diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 580ff9df5529..9aa4332a6b04 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -114,12 +114,6 @@ static ssize_t max6875_read(struct file *filp, struct kobject *kobj, struct max6875_data *data = i2c_get_clientdata(client); int slice, max_slice; - if (off > USER_EEPROM_SIZE) - return 0; - - if (off + count > USER_EEPROM_SIZE) - count = USER_EEPROM_SIZE - off; - /* refresh slices which contain requested bytes */ max_slice = (off + count - 1) >> SLICE_BITS; for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++) diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c deleted file mode 100644 index 8385177ff32b..000000000000 --- a/drivers/misc/eeprom/sunxi_sid.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> - * http://www.linux-sunxi.org - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver exposes the Allwinner security ID, efuses exported in byte- - * sized chunks. - */ - -#include <linux/compiler.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/export.h> -#include <linux/fs.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/kobject.h> -#include <linux/module.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/random.h> -#include <linux/slab.h> -#include <linux/stat.h> -#include <linux/sysfs.h> -#include <linux/types.h> - -#define DRV_NAME "sunxi-sid" - -struct sunxi_sid_data { - void __iomem *reg_base; - unsigned int keysize; -}; - -/* We read the entire key, due to a 32 bit read alignment requirement. Since we - * want to return the requested byte, this results in somewhat slower code and - * uses 4 times more reads as needed but keeps code simpler. Since the SID is - * only very rarely probed, this is not really an issue. - */ -static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data, - const unsigned int offset) -{ - u32 sid_key; - - if (offset >= sid_data->keysize) - return 0; - - sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4)); - sid_key >>= (offset % 4) * 8; - - return sid_key; /* Only return the last byte */ -} - -static ssize_t sid_read(struct file *fd, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t pos, size_t size) -{ - struct platform_device *pdev; - struct sunxi_sid_data *sid_data; - int i; - - pdev = to_platform_device(kobj_to_dev(kobj)); - sid_data = platform_get_drvdata(pdev); - - if (pos < 0 || pos >= sid_data->keysize) - return 0; - if (size > sid_data->keysize - pos) - size = sid_data->keysize - pos; - - for (i = 0; i < size; i++) - buf[i] = sunxi_sid_read_byte(sid_data, pos + i); - - return i; -} - -static struct bin_attribute sid_bin_attr = { - .attr = { .name = "eeprom", .mode = S_IRUGO, }, - .read = sid_read, -}; - -static int sunxi_sid_remove(struct platform_device *pdev) -{ - device_remove_bin_file(&pdev->dev, &sid_bin_attr); - dev_dbg(&pdev->dev, "driver unloaded\n"); - - return 0; -} - -static const struct of_device_id sunxi_sid_of_match[] = { - { .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16}, - { .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512}, - {/* sentinel */}, -}; -MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); - -static int sunxi_sid_probe(struct platform_device *pdev) -{ - struct sunxi_sid_data *sid_data; - struct resource *res; - const struct of_device_id *of_dev_id; - u8 *entropy; - unsigned int i; - - sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data), - GFP_KERNEL); - if (!sid_data) - return -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(sid_data->reg_base)) - return PTR_ERR(sid_data->reg_base); - - of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev); - if (!of_dev_id) - return -ENODEV; - sid_data->keysize = (int)of_dev_id->data; - - platform_set_drvdata(pdev, sid_data); - - sid_bin_attr.size = sid_data->keysize; - if (device_create_bin_file(&pdev->dev, &sid_bin_attr)) - return -ENODEV; - - entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL); - for (i = 0; i < sid_data->keysize; i++) - entropy[i] = sunxi_sid_read_byte(sid_data, i); - add_device_randomness(entropy, sid_data->keysize); - kfree(entropy); - - dev_dbg(&pdev->dev, "loaded\n"); - - return 0; -} - -static struct platform_driver sunxi_sid_driver = { - .probe = sunxi_sid_probe, - .remove = sunxi_sid_remove, - .driver = { - .name = DRV_NAME, - .of_match_table = sunxi_sid_of_match, - }, -}; -module_platform_driver(sunxi_sid_driver); - -MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); -MODULE_DESCRIPTION("Allwinner sunxi security id driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index 12c30b486b27..976df0013633 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -465,7 +465,6 @@ MODULE_DEVICE_TABLE(i2c, isl29003_id); static struct i2c_driver isl29003_driver = { .driver = { .name = ISL29003_DRV_NAME, - .owner = THIS_MODULE, .pm = ISL29003_PM_OPS, }, .probe = isl29003_probe, diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index e3e7f1dc27ba..0c3bb7e3ee80 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -274,7 +274,6 @@ static const struct dev_pm_ops lis3_pm_ops = { static struct i2c_driver lis3lv02d_i2c_driver = { .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, .pm = &lis3_pm_ops, .of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids), }, diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 518914a82b83..01447ca21c26 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -11,7 +11,7 @@ mei-objs += main.o mei-objs += amthif.o mei-objs += wd.o mei-objs += bus.o -mei-objs += nfc.o +mei-objs += bus-fixup.o mei-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c new file mode 100644 index 000000000000..3e536ca85f7d --- /dev/null +++ b/drivers/misc/mei/bus-fixup.c @@ -0,0 +1,306 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2003-2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uuid.h> + +#include <linux/mei_cl_bus.h> + +#include "mei_dev.h" +#include "client.h" + +#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \ + 0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06) + +static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; + +#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \ + 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) + +#define MEI_UUID_ANY NULL_UUID_LE + +/** + * number_of_connections - determine whether an client be on the bus + * according number of connections + * We support only clients: + * 1. with single connection + * 2. and fixed clients (max_number_of_connections == 0) + * + * @cldev: me clients device + */ +static void number_of_connections(struct mei_cl_device *cldev) +{ + dev_dbg(&cldev->dev, "running hook %s on %pUl\n", + __func__, mei_me_cl_uuid(cldev->me_cl)); + + if (cldev->me_cl->props.max_number_of_connections > 1) + cldev->do_match = 0; +} + +/** + * blacklist - blacklist a client from the bus + * + * @cldev: me clients device + */ +static void blacklist(struct mei_cl_device *cldev) +{ + dev_dbg(&cldev->dev, "running hook %s on %pUl\n", + __func__, mei_me_cl_uuid(cldev->me_cl)); + cldev->do_match = 0; +} + +struct mei_nfc_cmd { + u8 command; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; + u8 sub_command; + u8 data[]; +} __packed; + +struct mei_nfc_reply { + u8 command; + u8 status; + u16 req_id; + u32 reserved; + u16 data_size; + u8 sub_command; + u8 reply_status; + u8 data[]; +} __packed; + +struct mei_nfc_if_version { + u8 radio_version_sw[3]; + u8 reserved[3]; + u8 radio_version_hw[3]; + u8 i2c_addr; + u8 fw_ivn; + u8 vendor_id; + u8 radio_type; +} __packed; + + +#define MEI_NFC_CMD_MAINTENANCE 0x00 +#define MEI_NFC_SUBCMD_IF_VERSION 0x01 + +/* Vendors */ +#define MEI_NFC_VENDOR_INSIDE 0x00 +#define MEI_NFC_VENDOR_NXP 0x01 + +/* Radio types */ +#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00 +#define MEI_NFC_VENDOR_NXP_PN544 0x01 + +/** + * mei_nfc_if_version - get NFC interface version + * + * @cl: host client (nfc info) + * @ver: NFC interface version to be filled in + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_nfc_if_version(struct mei_cl *cl, + struct mei_nfc_if_version *ver) +{ + struct mei_device *bus; + struct mei_nfc_cmd cmd = { + .command = MEI_NFC_CMD_MAINTENANCE, + .data_size = 1, + .sub_command = MEI_NFC_SUBCMD_IF_VERSION, + }; + struct mei_nfc_reply *reply = NULL; + size_t if_version_length; + int bytes_recv, ret; + + bus = cl->dev; + + WARN_ON(mutex_is_locked(&bus->device_lock)); + + ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1); + if (ret < 0) { + dev_err(bus->dev, "Could not send IF version cmd\n"); + return ret; + } + + /* to be sure on the stack we alloc memory */ + if_version_length = sizeof(struct mei_nfc_reply) + + sizeof(struct mei_nfc_if_version); + + reply = kzalloc(if_version_length, GFP_KERNEL); + if (!reply) + return -ENOMEM; + + ret = 0; + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); + if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { + dev_err(bus->dev, "Could not read IF version\n"); + ret = -EIO; + goto err; + } + + memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version)); + + dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", + ver->fw_ivn, ver->vendor_id, ver->radio_type); + +err: + kfree(reply); + return ret; +} + +/** + * mei_nfc_radio_name - derive nfc radio name from the interface version + * + * @ver: NFC radio version + * + * Return: radio name string + */ +static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver) +{ + + if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) { + if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD) + return "microread"; + } + + if (ver->vendor_id == MEI_NFC_VENDOR_NXP) { + if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544) + return "pn544"; + } + + return NULL; +} + +/** + * mei_nfc - The nfc fixup function. The function retrieves nfc radio + * name and set is as device attribute so we can load + * the proper device driver for it + * + * @cldev: me client device (nfc) + */ +static void mei_nfc(struct mei_cl_device *cldev) +{ + struct mei_device *bus; + struct mei_cl *cl; + struct mei_me_client *me_cl = NULL; + struct mei_nfc_if_version ver; + const char *radio_name = NULL; + int ret; + + bus = cldev->bus; + + dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n", + __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match); + + mutex_lock(&bus->device_lock); + /* we need to connect to INFO GUID */ + cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY); + if (IS_ERR(cl)) { + ret = PTR_ERR(cl); + cl = NULL; + dev_err(bus->dev, "nfc hook alloc failed %d\n", ret); + goto out; + } + + me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid); + if (!me_cl) { + ret = -ENOTTY; + dev_err(bus->dev, "Cannot find nfc info %d\n", ret); + goto out; + } + + ret = mei_cl_connect(cl, me_cl, NULL); + if (ret < 0) { + dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n", + ret); + goto out; + } + + mutex_unlock(&bus->device_lock); + + ret = mei_nfc_if_version(cl, &ver); + if (ret) + goto disconnect; + + radio_name = mei_nfc_radio_name(&ver); + + if (!radio_name) { + ret = -ENOENT; + dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n", + ret); + goto disconnect; + } + + dev_dbg(bus->dev, "nfc radio %s\n", radio_name); + strlcpy(cldev->name, radio_name, sizeof(cldev->name)); + +disconnect: + mutex_lock(&bus->device_lock); + if (mei_cl_disconnect(cl) < 0) + dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n"); + + mei_cl_flush_queues(cl, NULL); + +out: + mei_cl_unlink(cl); + mutex_unlock(&bus->device_lock); + mei_me_cl_put(me_cl); + kfree(cl); + + if (ret) + cldev->do_match = 0; + + dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match); +} + +#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } + +static struct mei_fixup { + + const uuid_le uuid; + void (*hook)(struct mei_cl_device *cldev); +} mei_fixups[] = { + MEI_FIXUP(MEI_UUID_ANY, number_of_connections), + MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), + MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), +}; + +/** + * mei_cl_dev_fixup - run fixup handlers + * + * @cldev: me client device + */ +void mei_cl_dev_fixup(struct mei_cl_device *cldev) +{ + struct mei_fixup *f; + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + int i; + + for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) { + + f = &mei_fixups[i]; + if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 || + uuid_le_cmp(f->uuid, *uuid) == 0) + f->hook(cldev); + } +} + diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 458aa5a09c52..eef1c6b46ad8 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -30,276 +30,29 @@ #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) #define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) -static int mei_cl_device_match(struct device *dev, struct device_driver *drv) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - struct mei_cl_driver *driver = to_mei_cl_driver(drv); - const struct mei_cl_device_id *id; - const uuid_le *uuid; - const char *name; - - if (!device) - return 0; - - uuid = mei_me_cl_uuid(device->me_cl); - name = device->name; - - if (!driver || !driver->id_table) - return 0; - - id = driver->id_table; - - while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) { - - if (!uuid_le_cmp(*uuid, id->uuid)) { - if (id->name[0]) { - if (!strncmp(name, id->name, sizeof(id->name))) - return 1; - } else { - return 1; - } - } - - id++; - } - - return 0; -} - -static int mei_cl_device_probe(struct device *dev) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - struct mei_cl_driver *driver; - struct mei_cl_device_id id; - - if (!device) - return 0; - - driver = to_mei_cl_driver(dev->driver); - if (!driver || !driver->probe) - return -ENODEV; - - dev_dbg(dev, "Device probe\n"); - - strlcpy(id.name, device->name, sizeof(id.name)); - - return driver->probe(device, &id); -} - -static int mei_cl_device_remove(struct device *dev) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - struct mei_cl_driver *driver; - - if (!device || !dev->driver) - return 0; - - if (device->event_cb) { - device->event_cb = NULL; - cancel_work_sync(&device->event_work); - } - - driver = to_mei_cl_driver(dev->driver); - if (!driver->remove) { - dev->driver = NULL; - - return 0; - } - - return driver->remove(device); -} - -static ssize_t name_show(struct device *dev, struct device_attribute *a, - char *buf) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - size_t len; - - len = snprintf(buf, PAGE_SIZE, "%s", device->name); - - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; -} -static DEVICE_ATTR_RO(name); - -static ssize_t uuid_show(struct device *dev, struct device_attribute *a, - char *buf) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - const uuid_le *uuid = mei_me_cl_uuid(device->me_cl); - size_t len; - - len = snprintf(buf, PAGE_SIZE, "%pUl", uuid); - - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; -} -static DEVICE_ATTR_RO(uuid); - -static ssize_t modalias_show(struct device *dev, struct device_attribute *a, - char *buf) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - const uuid_le *uuid = mei_me_cl_uuid(device->me_cl); - size_t len; - - len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":", - device->name, MEI_CL_UUID_ARGS(uuid->b)); - - return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; -} -static DEVICE_ATTR_RO(modalias); - -static struct attribute *mei_cl_dev_attrs[] = { - &dev_attr_name.attr, - &dev_attr_uuid.attr, - &dev_attr_modalias.attr, - NULL, -}; -ATTRIBUTE_GROUPS(mei_cl_dev); - -static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - const uuid_le *uuid = mei_me_cl_uuid(device->me_cl); - - if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid)) - return -ENOMEM; - - if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name)) - return -ENOMEM; - - if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":", - device->name, MEI_CL_UUID_ARGS(uuid->b))) - return -ENOMEM; - - return 0; -} - -static struct bus_type mei_cl_bus_type = { - .name = "mei", - .dev_groups = mei_cl_dev_groups, - .match = mei_cl_device_match, - .probe = mei_cl_device_probe, - .remove = mei_cl_device_remove, - .uevent = mei_cl_uevent, -}; - -static void mei_cl_dev_release(struct device *dev) -{ - struct mei_cl_device *device = to_mei_cl_device(dev); - - if (!device) - return; - - mei_me_cl_put(device->me_cl); - kfree(device); -} - -static struct device_type mei_cl_device_type = { - .release = mei_cl_dev_release, -}; - -struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, - uuid_le uuid) -{ - struct mei_cl *cl; - - list_for_each_entry(cl, &dev->device_list, device_link) { - if (cl->device && cl->device->me_cl && - !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl))) - return cl; - } - - return NULL; -} - -struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, - struct mei_me_client *me_cl, - struct mei_cl *cl, - char *name) -{ - struct mei_cl_device *device; - int status; - - device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL); - if (!device) - return NULL; - - device->me_cl = mei_me_cl_get(me_cl); - if (!device->me_cl) { - kfree(device); - return NULL; - } - - device->cl = cl; - device->dev.parent = dev->dev; - device->dev.bus = &mei_cl_bus_type; - device->dev.type = &mei_cl_device_type; - - strlcpy(device->name, name, sizeof(device->name)); - - dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl)); - - status = device_register(&device->dev); - if (status) { - dev_err(dev->dev, "Failed to register MEI device\n"); - mei_me_cl_put(device->me_cl); - kfree(device); - return NULL; - } - - cl->device = device; - - dev_dbg(&device->dev, "client %s registered\n", name); - - return device; -} -EXPORT_SYMBOL_GPL(mei_cl_add_device); - -void mei_cl_remove_device(struct mei_cl_device *device) -{ - device_unregister(&device->dev); -} -EXPORT_SYMBOL_GPL(mei_cl_remove_device); - -int __mei_cl_driver_register(struct mei_cl_driver *driver, struct module *owner) -{ - int err; - - driver->driver.name = driver->name; - driver->driver.owner = owner; - driver->driver.bus = &mei_cl_bus_type; - - err = driver_register(&driver->driver); - if (err) - return err; - - pr_debug("mei: driver [%s] registered\n", driver->driver.name); - - return 0; -} -EXPORT_SYMBOL_GPL(__mei_cl_driver_register); - -void mei_cl_driver_unregister(struct mei_cl_driver *driver) -{ - driver_unregister(&driver->driver); - - pr_debug("mei: driver [%s] unregistered\n", driver->driver.name); -} -EXPORT_SYMBOL_GPL(mei_cl_driver_unregister); - +/** + * __mei_cl_send - internal client send (write) + * + * @cl: host client + * @buf: buffer to send + * @length: buffer length + * @blocking: wait for write completion + * + * Return: written size bytes or < 0 on error + */ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, bool blocking) { - struct mei_device *dev; + struct mei_device *bus; struct mei_cl_cb *cb = NULL; ssize_t rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; - dev = cl->dev; + bus = cl->dev; - mutex_lock(&dev->device_lock); + mutex_lock(&bus->device_lock); if (!mei_cl_is_connected(cl)) { rets = -ENODEV; goto out; @@ -327,16 +80,25 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, rets = mei_cl_write(cl, cb, blocking); out: - mutex_unlock(&dev->device_lock); + mutex_unlock(&bus->device_lock); if (rets < 0) mei_io_cb_free(cb); return rets; } +/** + * __mei_cl_recv - internal client receive (read) + * + * @cl: host client + * @buf: buffer to send + * @length: buffer length + * + * Return: read size in bytes of < 0 on error + */ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) { - struct mei_device *dev; + struct mei_device *bus; struct mei_cl_cb *cb; size_t r_length; ssize_t rets; @@ -344,9 +106,9 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) if (WARN_ON(!cl || !cl->dev)) return -ENODEV; - dev = cl->dev; + bus = cl->dev; - mutex_lock(&dev->device_lock); + mutex_lock(&bus->device_lock); cb = mei_cl_read_cb(cl, NULL); if (cb) @@ -356,9 +118,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) if (rets && rets != -EBUSY) goto out; + /* wait on event only if there is no other waiter */ if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) { - mutex_unlock(&dev->device_lock); + mutex_unlock(&bus->device_lock); if (wait_event_interruptible(cl->rx_wait, (!list_empty(&cl->rd_completed)) || @@ -369,7 +132,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) return -ERESTARTSYS; } - mutex_lock(&dev->device_lock); + mutex_lock(&bus->device_lock); if (!mei_cl_is_connected(cl)) { rets = -EBUSY; @@ -396,14 +159,23 @@ copy: free: mei_io_cb_free(cb); out: - mutex_unlock(&dev->device_lock); + mutex_unlock(&bus->device_lock); return rets; } -ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length) +/** + * mei_cl_send - me device send (write) + * + * @cldev: me client device + * @buf: buffer to send + * @length: buffer length + * + * Return: written size in bytes or < 0 on error + */ +ssize_t mei_cl_send(struct mei_cl_device *cldev, u8 *buf, size_t length) { - struct mei_cl *cl = device->cl; + struct mei_cl *cl = cldev->cl; if (cl == NULL) return -ENODEV; @@ -412,9 +184,18 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length) } EXPORT_SYMBOL_GPL(mei_cl_send); -ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length) +/** + * mei_cl_recv - client receive (read) + * + * @cldev: me client device + * @buf: buffer to send + * @length: buffer length + * + * Return: read size in bytes of < 0 on error + */ +ssize_t mei_cl_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) { - struct mei_cl *cl = device->cl; + struct mei_cl *cl = cldev->cl; if (cl == NULL) return -ENODEV; @@ -423,134 +204,697 @@ ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length) } EXPORT_SYMBOL_GPL(mei_cl_recv); +/** + * mei_bus_event_work - dispatch rx event for a bus device + * and schedule new work + * + * @work: work + */ static void mei_bus_event_work(struct work_struct *work) { - struct mei_cl_device *device; + struct mei_cl_device *cldev; - device = container_of(work, struct mei_cl_device, event_work); + cldev = container_of(work, struct mei_cl_device, event_work); - if (device->event_cb) - device->event_cb(device, device->events, device->event_context); + if (cldev->event_cb) + cldev->event_cb(cldev, cldev->events, cldev->event_context); - device->events = 0; + cldev->events = 0; /* Prepare for the next read */ - mei_cl_read_start(device->cl, 0, NULL); + if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) + mei_cl_read_start(cldev->cl, 0, NULL); } -int mei_cl_register_event_cb(struct mei_cl_device *device, +/** + * mei_cl_bus_notify_event - schedule notify cb on bus client + * + * @cl: host client + */ +void mei_cl_bus_notify_event(struct mei_cl *cl) +{ + struct mei_cl_device *cldev = cl->cldev; + + if (!cldev || !cldev->event_cb) + return; + + if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) + return; + + if (!cl->notify_ev) + return; + + set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); + + schedule_work(&cldev->event_work); + + cl->notify_ev = false; +} + +/** + * mei_cl_bus_rx_event - schedule rx evenet + * + * @cl: host client + */ +void mei_cl_bus_rx_event(struct mei_cl *cl) +{ + struct mei_cl_device *cldev = cl->cldev; + + if (!cldev || !cldev->event_cb) + return; + + if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) + return; + + set_bit(MEI_CL_EVENT_RX, &cldev->events); + + schedule_work(&cldev->event_work); +} + +/** + * mei_cl_register_event_cb - register event callback + * + * @cldev: me client devices + * @event_cb: callback function + * @events_mask: requested events bitmask + * @context: driver context data + * + * Return: 0 on success + * -EALREADY if an callback is already registered + * <0 on other errors + */ +int mei_cl_register_event_cb(struct mei_cl_device *cldev, + unsigned long events_mask, mei_cl_event_cb_t event_cb, void *context) { - if (device->event_cb) + int ret; + + if (cldev->event_cb) return -EALREADY; - device->events = 0; - device->event_cb = event_cb; - device->event_context = context; - INIT_WORK(&device->event_work, mei_bus_event_work); + cldev->events = 0; + cldev->events_mask = events_mask; + cldev->event_cb = event_cb; + cldev->event_context = context; + INIT_WORK(&cldev->event_work, mei_bus_event_work); - mei_cl_read_start(device->cl, 0, NULL); + if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { + ret = mei_cl_read_start(cldev->cl, 0, NULL); + if (ret && ret != -EBUSY) + return ret; + } + + if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) { + mutex_lock(&cldev->cl->dev->device_lock); + ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0); + mutex_unlock(&cldev->cl->dev->device_lock); + if (ret) + return ret; + } return 0; } EXPORT_SYMBOL_GPL(mei_cl_register_event_cb); -void *mei_cl_get_drvdata(const struct mei_cl_device *device) +/** + * mei_cl_get_drvdata - driver data getter + * + * @cldev: mei client device + * + * Return: driver private data + */ +void *mei_cl_get_drvdata(const struct mei_cl_device *cldev) { - return dev_get_drvdata(&device->dev); + return dev_get_drvdata(&cldev->dev); } EXPORT_SYMBOL_GPL(mei_cl_get_drvdata); -void mei_cl_set_drvdata(struct mei_cl_device *device, void *data) +/** + * mei_cl_set_drvdata - driver data setter + * + * @cldev: mei client device + * @data: data to store + */ +void mei_cl_set_drvdata(struct mei_cl_device *cldev, void *data) { - dev_set_drvdata(&device->dev, data); + dev_set_drvdata(&cldev->dev, data); } EXPORT_SYMBOL_GPL(mei_cl_set_drvdata); -int mei_cl_enable_device(struct mei_cl_device *device) +/** + * mei_cl_enable_device - enable me client device + * create connection with me client + * + * @cldev: me client device + * + * Return: 0 on success and < 0 on error + */ +int mei_cl_enable_device(struct mei_cl_device *cldev) { - int err; - struct mei_device *dev; - struct mei_cl *cl = device->cl; - - if (cl == NULL) - return -ENODEV; - - dev = cl->dev; - - mutex_lock(&dev->device_lock); + struct mei_device *bus = cldev->bus; + struct mei_cl *cl; + int ret; + + cl = cldev->cl; + + if (!cl) { + mutex_lock(&bus->device_lock); + cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY); + mutex_unlock(&bus->device_lock); + if (IS_ERR(cl)) + return PTR_ERR(cl); + /* update pointers */ + cldev->cl = cl; + cl->cldev = cldev; + } + mutex_lock(&bus->device_lock); if (mei_cl_is_connected(cl)) { - mutex_unlock(&dev->device_lock); - dev_warn(dev->dev, "Already connected"); - return -EBUSY; + ret = 0; + goto out; } - err = mei_cl_connect(cl, device->me_cl, NULL); - if (err < 0) { - mutex_unlock(&dev->device_lock); - dev_err(dev->dev, "Could not connect to the ME client"); - - return err; + if (!mei_me_cl_is_active(cldev->me_cl)) { + dev_err(&cldev->dev, "me client is not active\n"); + ret = -ENOTTY; + goto out; } - mutex_unlock(&dev->device_lock); + ret = mei_cl_connect(cl, cldev->me_cl, NULL); + if (ret < 0) + dev_err(&cldev->dev, "cannot connect\n"); - if (device->event_cb) - mei_cl_read_start(device->cl, 0, NULL); +out: + mutex_unlock(&bus->device_lock); - return 0; + return ret; } EXPORT_SYMBOL_GPL(mei_cl_enable_device); -int mei_cl_disable_device(struct mei_cl_device *device) +/** + * mei_cl_disable_device - disable me client device + * disconnect form the me client + * + * @cldev: me client device + * + * Return: 0 on success and < 0 on error + */ +int mei_cl_disable_device(struct mei_cl_device *cldev) { + struct mei_device *bus; + struct mei_cl *cl; int err; - struct mei_device *dev; - struct mei_cl *cl = device->cl; - if (cl == NULL) + if (!cldev || !cldev->cl) return -ENODEV; - dev = cl->dev; + cl = cldev->cl; - device->event_cb = NULL; + bus = cldev->bus; - mutex_lock(&dev->device_lock); + cldev->event_cb = NULL; + + mutex_lock(&bus->device_lock); if (!mei_cl_is_connected(cl)) { - dev_err(dev->dev, "Already disconnected"); + dev_err(bus->dev, "Already disconnected"); err = 0; goto out; } err = mei_cl_disconnect(cl); - if (err < 0) { - dev_err(dev->dev, "Could not disconnect from the ME client"); - goto out; - } + if (err < 0) + dev_err(bus->dev, "Could not disconnect from the ME client"); +out: /* Flush queues and remove any pending read */ mei_cl_flush_queues(cl, NULL); + mei_cl_unlink(cl); -out: - mutex_unlock(&dev->device_lock); - return err; + kfree(cl); + cldev->cl = NULL; + mutex_unlock(&bus->device_lock); + return err; } EXPORT_SYMBOL_GPL(mei_cl_disable_device); -void mei_cl_bus_rx_event(struct mei_cl *cl) +/** + * mei_cl_device_find - find matching entry in the driver id table + * + * @cldev: me client device + * @cldrv: me client driver + * + * Return: id on success; NULL if no id is matching + */ +static const +struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev, + struct mei_cl_driver *cldrv) { - struct mei_cl_device *device = cl->device; + const struct mei_cl_device_id *id; + const uuid_le *uuid; + + uuid = mei_me_cl_uuid(cldev->me_cl); + + id = cldrv->id_table; + while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) { + if (!uuid_le_cmp(*uuid, id->uuid)) { + + if (!cldev->name[0]) + return id; + + if (!strncmp(cldev->name, id->name, sizeof(id->name))) + return id; + } + + id++; + } + + return NULL; +} + +/** + * mei_cl_device_match - device match function + * + * @dev: device + * @drv: driver + * + * Return: 1 if matching device was found 0 otherwise + */ +static int mei_cl_device_match(struct device *dev, struct device_driver *drv) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); + const struct mei_cl_device_id *found_id; + + if (!cldev) + return 0; + + if (!cldev->do_match) + return 0; + + if (!cldrv || !cldrv->id_table) + return 0; + + found_id = mei_cl_device_find(cldev, cldrv); + if (found_id) + return 1; + + return 0; +} + +/** + * mei_cl_device_probe - bus probe function + * + * @dev: device + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_cl_device_probe(struct device *dev) +{ + struct mei_cl_device *cldev; + struct mei_cl_driver *cldrv; + const struct mei_cl_device_id *id; + + cldev = to_mei_cl_device(dev); + cldrv = to_mei_cl_driver(dev->driver); + + if (!cldev) + return 0; + + if (!cldrv || !cldrv->probe) + return -ENODEV; + + id = mei_cl_device_find(cldev, cldrv); + if (!id) + return -ENODEV; + + __module_get(THIS_MODULE); + + return cldrv->probe(cldev, id); +} + +/** + * mei_cl_device_remove - remove device from the bus + * + * @dev: device + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_cl_device_remove(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct mei_cl_driver *cldrv; + int ret = 0; + + if (!cldev || !dev->driver) + return 0; + + if (cldev->event_cb) { + cldev->event_cb = NULL; + cancel_work_sync(&cldev->event_work); + } + + cldrv = to_mei_cl_driver(dev->driver); + if (cldrv->remove) + ret = cldrv->remove(cldev); + + module_put(THIS_MODULE); + dev->driver = NULL; + return ret; + +} + +static ssize_t name_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + size_t len; + + len = snprintf(buf, PAGE_SIZE, "%s", cldev->name); + + return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; +} +static DEVICE_ATTR_RO(name); + +static ssize_t uuid_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + size_t len; + + len = snprintf(buf, PAGE_SIZE, "%pUl", uuid); + + return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; +} +static DEVICE_ATTR_RO(uuid); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + size_t len; + + len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":", + cldev->name, MEI_CL_UUID_ARGS(uuid->b)); + + return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *mei_cl_dev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_uuid.attr, + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(mei_cl_dev); + +/** + * mei_cl_device_uevent - me client bus uevent handler + * + * @dev: device + * @env: uevent kobject + * + * Return: 0 on success -ENOMEM on when add_uevent_var fails + */ +static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); + + if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid)) + return -ENOMEM; + + if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name)) + return -ENOMEM; + + if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":", + cldev->name, MEI_CL_UUID_ARGS(uuid->b))) + return -ENOMEM; - if (!device || !device->event_cb) + return 0; +} + +static struct bus_type mei_cl_bus_type = { + .name = "mei", + .dev_groups = mei_cl_dev_groups, + .match = mei_cl_device_match, + .probe = mei_cl_device_probe, + .remove = mei_cl_device_remove, + .uevent = mei_cl_device_uevent, +}; + +static struct mei_device *mei_dev_bus_get(struct mei_device *bus) +{ + if (bus) + get_device(bus->dev); + + return bus; +} + +static void mei_dev_bus_put(struct mei_device *bus) +{ + if (bus) + put_device(bus->dev); +} + +static void mei_cl_dev_release(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + + if (!cldev) + return; + + mei_me_cl_put(cldev->me_cl); + mei_dev_bus_put(cldev->bus); + kfree(cldev); +} + +static struct device_type mei_cl_device_type = { + .release = mei_cl_dev_release, +}; + +/** + * mei_cl_dev_alloc - initialize and allocate mei client device + * + * @bus: mei device + * @me_cl: me client + * + * Return: allocated device structur or NULL on allocation failure + */ +static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus, + struct mei_me_client *me_cl) +{ + struct mei_cl_device *cldev; + + cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL); + if (!cldev) + return NULL; + + device_initialize(&cldev->dev); + cldev->dev.parent = bus->dev; + cldev->dev.bus = &mei_cl_bus_type; + cldev->dev.type = &mei_cl_device_type; + cldev->bus = mei_dev_bus_get(bus); + cldev->me_cl = mei_me_cl_get(me_cl); + cldev->is_added = 0; + INIT_LIST_HEAD(&cldev->bus_list); + + return cldev; +} + +/** + * mei_cl_dev_setup - setup me client device + * run fix up routines and set the device name + * + * @bus: mei device + * @cldev: me client device + * + * Return: true if the device is eligible for enumeration + */ +static bool mei_cl_dev_setup(struct mei_device *bus, + struct mei_cl_device *cldev) +{ + cldev->do_match = 1; + mei_cl_dev_fixup(cldev); + + if (cldev->do_match) + dev_set_name(&cldev->dev, "mei:%s:%pUl", + cldev->name, mei_me_cl_uuid(cldev->me_cl)); + + return cldev->do_match == 1; +} + +/** + * mei_cl_bus_dev_add - add me client devices + * + * @cldev: me client device + * + * Return: 0 on success; < 0 on failre + */ +static int mei_cl_bus_dev_add(struct mei_cl_device *cldev) +{ + int ret; + + dev_dbg(cldev->bus->dev, "adding %pUL\n", mei_me_cl_uuid(cldev->me_cl)); + ret = device_add(&cldev->dev); + if (!ret) + cldev->is_added = 1; + + return ret; +} + +/** + * mei_cl_bus_dev_stop - stop the driver + * + * @cldev: me client device + */ +static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev) +{ + if (cldev->is_added) + device_release_driver(&cldev->dev); +} + +/** + * mei_cl_bus_dev_destroy - destroy me client devices object + * + * @cldev: me client device + */ +static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev) +{ + if (!cldev->is_added) + return; + + device_del(&cldev->dev); + + mutex_lock(&cldev->bus->cl_bus_lock); + list_del_init(&cldev->bus_list); + mutex_unlock(&cldev->bus->cl_bus_lock); + + cldev->is_added = 0; + put_device(&cldev->dev); +} + +/** + * mei_cl_bus_remove_device - remove a devices form the bus + * + * @cldev: me client device + */ +static void mei_cl_bus_remove_device(struct mei_cl_device *cldev) +{ + mei_cl_bus_dev_stop(cldev); + mei_cl_bus_dev_destroy(cldev); +} + +/** + * mei_cl_bus_remove_devices - remove all devices form the bus + * + * @bus: mei device + */ +void mei_cl_bus_remove_devices(struct mei_device *bus) +{ + struct mei_cl_device *cldev, *next; + + list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list) + mei_cl_bus_remove_device(cldev); +} + + +/** + * mei_cl_dev_init - allocate and initializes an mei client devices + * based on me client + * + * @bus: mei device + * @me_cl: me client + */ +static void mei_cl_dev_init(struct mei_device *bus, struct mei_me_client *me_cl) +{ + struct mei_cl_device *cldev; + + dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); + + if (me_cl->bus_added) return; - set_bit(MEI_CL_EVENT_RX, &device->events); + cldev = mei_cl_dev_alloc(bus, me_cl); + if (!cldev) + return; + + mutex_lock(&cldev->bus->cl_bus_lock); + me_cl->bus_added = true; + list_add_tail(&cldev->bus_list, &bus->device_list); + mutex_unlock(&cldev->bus->cl_bus_lock); + +} + +/** + * mei_cl_bus_rescan - scan me clients list and add create + * devices for eligible clients + * + * @bus: mei device + */ +void mei_cl_bus_rescan(struct mei_device *bus) +{ + struct mei_cl_device *cldev, *n; + struct mei_me_client *me_cl; + + down_read(&bus->me_clients_rwsem); + list_for_each_entry(me_cl, &bus->me_clients, list) + mei_cl_dev_init(bus, me_cl); + up_read(&bus->me_clients_rwsem); + + mutex_lock(&bus->cl_bus_lock); + list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) { + + if (!mei_me_cl_is_active(cldev->me_cl)) { + mei_cl_bus_remove_device(cldev); + continue; + } + + if (cldev->is_added) + continue; + + if (mei_cl_dev_setup(bus, cldev)) + mei_cl_bus_dev_add(cldev); + else { + list_del_init(&cldev->bus_list); + put_device(&cldev->dev); + } + } + mutex_unlock(&bus->cl_bus_lock); + + dev_dbg(bus->dev, "rescan end"); +} + +int __mei_cl_driver_register(struct mei_cl_driver *cldrv, struct module *owner) +{ + int err; + + cldrv->driver.name = cldrv->name; + cldrv->driver.owner = owner; + cldrv->driver.bus = &mei_cl_bus_type; + + err = driver_register(&cldrv->driver); + if (err) + return err; - schedule_work(&device->event_work); + pr_debug("mei: driver [%s] registered\n", cldrv->driver.name); + + return 0; } +EXPORT_SYMBOL_GPL(__mei_cl_driver_register); + +void mei_cl_driver_unregister(struct mei_cl_driver *cldrv) +{ + driver_unregister(&cldrv->driver); + + pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name); +} +EXPORT_SYMBOL_GPL(mei_cl_driver_unregister); + int __init mei_cl_bus_init(void) { diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 6decbe136ea7..a6c87c713193 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -555,10 +555,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) init_waitqueue_head(&cl->wait); init_waitqueue_head(&cl->rx_wait); init_waitqueue_head(&cl->tx_wait); + init_waitqueue_head(&cl->ev_wait); INIT_LIST_HEAD(&cl->rd_completed); INIT_LIST_HEAD(&cl->rd_pending); INIT_LIST_HEAD(&cl->link); - INIT_LIST_HEAD(&cl->device_link); cl->writing_state = MEI_IDLE; cl->state = MEI_FILE_INITIALIZING; cl->dev = dev; @@ -690,16 +690,12 @@ void mei_host_client_init(struct work_struct *work) mei_wd_host_init(dev, me_cl); mei_me_cl_put(me_cl); - me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid); - if (me_cl) - mei_nfc_host_init(dev, me_cl); - mei_me_cl_put(me_cl); - - dev->dev_state = MEI_DEV_ENABLED; dev->reset_count = 0; mutex_unlock(&dev->device_lock); + mei_cl_bus_rescan(dev); + pm_runtime_mark_last_busy(dev->dev); dev_dbg(dev->dev, "rpm: autosuspend\n"); pm_runtime_autosuspend(dev->dev); @@ -841,45 +837,22 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, return ret; } - - /** - * mei_cl_disconnect - disconnect host client from the me one + * __mei_cl_disconnect - disconnect host client from the me one + * internal function runtime pm has to be already acquired * * @cl: host client * - * Locking: called under "dev->device_lock" lock - * * Return: 0 on success, <0 on failure. */ -int mei_cl_disconnect(struct mei_cl *cl) +static int __mei_cl_disconnect(struct mei_cl *cl) { struct mei_device *dev; struct mei_cl_cb *cb; int rets; - if (WARN_ON(!cl || !cl->dev)) - return -ENODEV; - dev = cl->dev; - cl_dbg(dev, cl, "disconnecting"); - - if (!mei_cl_is_connected(cl)) - return 0; - - if (mei_cl_is_fixed_address(cl)) { - mei_cl_set_disconnected(cl); - return 0; - } - - rets = pm_runtime_get(dev->dev); - if (rets < 0 && rets != -EINPROGRESS) { - pm_runtime_put_noidle(dev->dev); - cl_err(dev, cl, "rpm: get failed %d\n", rets); - return rets; - } - cl->state = MEI_FILE_DISCONNECTING; cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL); @@ -915,11 +888,52 @@ out: if (!rets) cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_disconnect - disconnect host client from the me one + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on success, <0 on failure. + */ +int mei_cl_disconnect(struct mei_cl *cl) +{ + struct mei_device *dev; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + cl_dbg(dev, cl, "disconnecting"); + + if (!mei_cl_is_connected(cl)) + return 0; + + if (mei_cl_is_fixed_address(cl)) { + mei_cl_set_disconnected(cl); + return 0; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + rets = __mei_cl_disconnect(cl); + cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); - mei_io_cb_free(cb); return rets; } @@ -1064,11 +1078,23 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, (cl->state == MEI_FILE_CONNECTED || + cl->state == MEI_FILE_DISCONNECT_REQUIRED || cl->state == MEI_FILE_DISCONNECT_REPLY), mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); if (!mei_cl_is_connected(cl)) { + if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); + /* ignore disconnect return valuue; + * in case of failure reset will be invoked + */ + __mei_cl_disconnect(cl); + rets = -EFAULT; + goto out; + } + /* timeout or something went really wrong */ if (!cl->status) cl->status = -EFAULT; @@ -1181,6 +1207,221 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) } /** + * mei_cl_notify_fop2req - convert fop to proper request + * + * @fop: client notification start response command + * + * Return: MEI_HBM_NOTIFICATION_START/STOP + */ +u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) +{ + if (fop == MEI_FOP_NOTIFY_START) + return MEI_HBM_NOTIFICATION_START; + else + return MEI_HBM_NOTIFICATION_STOP; +} + +/** + * mei_cl_notify_req2fop - convert notification request top file operation type + * + * @req: hbm notification request type + * + * Return: MEI_FOP_NOTIFY_START/STOP + */ +enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) +{ + if (req == MEI_HBM_NOTIFICATION_START) + return MEI_FOP_NOTIFY_START; + else + return MEI_FOP_NOTIFY_STOP; +} + +/** + * mei_cl_irq_notify - send notification request in irq_thread context + * + * @cl: client + * @cb: callback block. + * @cmpl_list: complete list. + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list) +{ + struct mei_device *dev = cl->dev; + u32 msg_slots; + int slots; + int ret; + bool request; + + msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); + slots = mei_hbuf_empty_slots(dev); + + if (slots < msg_slots) + return -EMSGSIZE; + + request = mei_cl_notify_fop2req(cb->fop_type); + ret = mei_hbm_cl_notify_req(dev, cl, request); + if (ret) { + cl->status = ret; + list_move_tail(&cb->list, &cmpl_list->list); + return ret; + } + + list_move_tail(&cb->list, &dev->ctrl_rd_list.list); + return 0; +} + +/** + * mei_cl_notify_request - send notification stop/start request + * + * @cl: host client + * @file: associate request with file + * @request: 1 for start or 0 for stop + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request) +{ + struct mei_device *dev; + struct mei_cl_cb *cb; + enum mei_cb_file_ops fop_type; + int rets; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!dev->hbm_f_ev_supported) { + cl_dbg(dev, cl, "notifications not supported\n"); + return -EOPNOTSUPP; + } + + rets = pm_runtime_get(dev->dev); + if (rets < 0 && rets != -EINPROGRESS) { + pm_runtime_put_noidle(dev->dev); + cl_err(dev, cl, "rpm: get failed %d\n", rets); + return rets; + } + + fop_type = mei_cl_notify_req2fop(request); + cb = mei_io_cb_init(cl, fop_type, file); + if (!cb) { + rets = -ENOMEM; + goto out; + } + + if (mei_hbuf_acquire(dev)) { + if (mei_hbm_cl_notify_req(dev, cl, request)) { + rets = -ENODEV; + goto out; + } + list_add_tail(&cb->list, &dev->ctrl_rd_list.list); + } else { + list_add_tail(&cb->list, &dev->ctrl_wr_list.list); + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(cl->wait, cl->notify_en == request, + mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + mutex_lock(&dev->device_lock); + + if (cl->notify_en != request) { + mei_io_list_flush(&dev->ctrl_rd_list, cl); + mei_io_list_flush(&dev->ctrl_wr_list, cl); + if (!cl->status) + cl->status = -EFAULT; + } + + rets = cl->status; + +out: + cl_dbg(dev, cl, "rpm: autosuspend\n"); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + mei_io_cb_free(cb); + return rets; +} + +/** + * mei_cl_notify - raise notification + * + * @cl: host client + * + * Locking: called under "dev->device_lock" lock + */ +void mei_cl_notify(struct mei_cl *cl) +{ + struct mei_device *dev; + + if (!cl || !cl->dev) + return; + + dev = cl->dev; + + if (!cl->notify_en) + return; + + cl_dbg(dev, cl, "notify event"); + cl->notify_ev = true; + wake_up_interruptible_all(&cl->ev_wait); + + if (cl->ev_async) + kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); + + mei_cl_bus_notify_event(cl); +} + +/** + * mei_cl_notify_get - get or wait for notification event + * + * @cl: host client + * @block: this request is blocking + * @notify_ev: true if notification event was received + * + * Locking: called under "dev->device_lock" lock + * + * Return: 0 on such and error otherwise. + */ +int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) +{ + struct mei_device *dev; + int rets; + + *notify_ev = false; + + if (WARN_ON(!cl || !cl->dev)) + return -ENODEV; + + dev = cl->dev; + + if (!mei_cl_is_connected(cl)) + return -ENODEV; + + if (cl->notify_ev) + goto out; + + if (!block) + return -EAGAIN; + + mutex_unlock(&dev->device_lock); + rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); + mutex_lock(&dev->device_lock); + + if (rets < 0) + return rets; + +out: + *notify_ev = cl->notify_ev; + cl->notify_ev = false; + return 0; +} + +/** * mei_cl_read_start - the start read client message function. * * @cl: host client @@ -1356,6 +1597,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; + int size; int rets; @@ -1367,10 +1609,10 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) dev = cl->dev; - buf = &cb->buf; + size = buf->size; - cl_dbg(dev, cl, "size=%d\n", buf->size); + cl_dbg(dev, cl, "size=%d\n", size); rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { @@ -1394,21 +1636,21 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) if (rets == 0) { cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); - rets = buf->size; + rets = size; goto out; } if (!mei_hbuf_acquire(dev)) { cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); - rets = buf->size; + rets = size; goto out; } /* Check for a maximum length */ - if (buf->size > mei_hbuf_max_len(dev)) { + if (size > mei_hbuf_max_len(dev)) { mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; } else { - mei_hdr.length = buf->size; + mei_hdr.length = size; mei_hdr.msg_complete = 1; } @@ -1430,6 +1672,7 @@ out: else list_add_tail(&cb->list, &dev->write_list.list); + cb = NULL; if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { mutex_unlock(&dev->device_lock); @@ -1444,7 +1687,7 @@ out: } } - rets = buf->size; + rets = size; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); @@ -1486,6 +1729,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) case MEI_FOP_CONNECT: case MEI_FOP_DISCONNECT: + case MEI_FOP_NOTIFY_STOP: + case MEI_FOP_NOTIFY_START: if (waitqueue_active(&cl->wait)) wake_up(&cl->wait); @@ -1528,6 +1773,12 @@ void mei_cl_all_wakeup(struct mei_device *dev) cl_dbg(dev, cl, "Waking up writing client!\n"); wake_up_interruptible(&cl->tx_wait); } + + /* synchronized under device mutex */ + if (waitqueue_active(&cl->ev_wait)) { + cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); + wake_up_interruptible(&cl->ev_wait); + } } } diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 8d7f057f1045..1c7cad07d731 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -219,6 +219,14 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); void mei_host_client_init(struct work_struct *work); +u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop); +enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request); +int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request); +int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, + struct mei_cl_cb *cmpl_list); +int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); +void mei_cl_notify(struct mei_cl *cl); + void mei_cl_all_disconnect(struct mei_device *dev); void mei_cl_all_wakeup(struct mei_device *dev); void mei_cl_all_write_clear(struct mei_device *dev); diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index eb868341247f..4b469cf9e60f 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -154,6 +154,12 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n"); pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n", dev->hbm_f_pg_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n", + dev->hbm_f_dc_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n", + dev->hbm_f_dot_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n", + dev->hbm_f_ev_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index a4f283165a33..8eec887c8f70 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -52,6 +52,7 @@ static const char *mei_cl_conn_status_str(enum mei_cl_connect_status status) MEI_CL_CS(ALREADY_STARTED); MEI_CL_CS(OUT_OF_RESOURCES); MEI_CL_CS(MESSAGE_SMALL); + MEI_CL_CS(NOT_ALLOWED); default: return "unknown"; } #undef MEI_CL_CCS @@ -89,6 +90,7 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status) case MEI_CL_CONN_ALREADY_STARTED: return -EBUSY; case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY; case MEI_CL_CONN_MESSAGE_SMALL: return -EINVAL; + case MEI_CL_CONN_NOT_ALLOWED: return -EBUSY; default: return -EINVAL; } } @@ -299,6 +301,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; memset(enum_req, 0, len); enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; + enum_req->allow_add = dev->hbm_f_dc_supported; ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); if (ret) { @@ -344,6 +347,180 @@ static int mei_hbm_me_cl_add(struct mei_device *dev, } /** + * mei_hbm_add_cl_resp - send response to fw on client add request + * + * @dev: the device structure + * @addr: me address + * @status: response status + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status) +{ + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_add_client_response *resp; + const size_t len = sizeof(struct hbm_add_client_response); + int ret; + + dev_dbg(dev->dev, "adding client response\n"); + + resp = (struct hbm_add_client_response *)dev->wr_msg.data; + + mei_hbm_hdr(mei_hdr, len); + memset(resp, 0, sizeof(struct hbm_add_client_response)); + + resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD; + resp->me_addr = addr; + resp->status = status; + + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) + dev_err(dev->dev, "add client response write failed: ret = %d\n", + ret); + return ret; +} + +/** + * mei_hbm_fw_add_cl_req - request from the fw to add a client + * + * @dev: the device structure + * @req: add client request + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_fw_add_cl_req(struct mei_device *dev, + struct hbm_add_client_request *req) +{ + int ret; + u8 status = MEI_HBMS_SUCCESS; + + BUILD_BUG_ON(sizeof(struct hbm_add_client_request) != + sizeof(struct hbm_props_response)); + + ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req); + if (ret) + status = !MEI_HBMS_SUCCESS; + + return mei_hbm_add_cl_resp(dev, req->me_addr, status); +} + +/** + * mei_hbm_cl_notify_req - send notification request + * + * @dev: the device structure + * @cl: a client to disconnect from + * @start: true for start false for stop + * + * Return: 0 on success and -EIO on write failure + */ +int mei_hbm_cl_notify_req(struct mei_device *dev, + struct mei_cl *cl, u8 start) +{ + + struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct hbm_notification_request *req; + const size_t len = sizeof(struct hbm_notification_request); + int ret; + + mei_hbm_hdr(mei_hdr, len); + mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len); + + req = (struct hbm_notification_request *)dev->wr_msg.data; + req->start = start; + + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) + dev_err(dev->dev, "notify request failed: ret = %d\n", ret); + + return ret; +} + +/** + * notify_res_to_fop - convert notification response to the proper + * notification FOP + * + * @cmd: client notification start response command + * + * Return: MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP; + */ +static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_notification_response *rs = + (struct hbm_notification_response *)cmd; + + return mei_cl_notify_req2fop(rs->start); +} + +/** + * mei_hbm_cl_notify_start_res - update the client state according + * notify start response + * + * @dev: the device structure + * @cl: mei host client + * @cmd: client notification start response command + */ +static void mei_hbm_cl_notify_start_res(struct mei_device *dev, + struct mei_cl *cl, + struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_notification_response *rs = + (struct hbm_notification_response *)cmd; + + cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status); + + if (rs->status == MEI_HBMS_SUCCESS || + rs->status == MEI_HBMS_ALREADY_STARTED) { + cl->notify_en = true; + cl->status = 0; + } else { + cl->status = -EINVAL; + } +} + +/** + * mei_hbm_cl_notify_stop_res - update the client state according + * notify stop response + * + * @dev: the device structure + * @cl: mei host client + * @cmd: client notification stop response command + */ +static void mei_hbm_cl_notify_stop_res(struct mei_device *dev, + struct mei_cl *cl, + struct mei_hbm_cl_cmd *cmd) +{ + struct hbm_notification_response *rs = + (struct hbm_notification_response *)cmd; + + cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status); + + if (rs->status == MEI_HBMS_SUCCESS || + rs->status == MEI_HBMS_NOT_STARTED) { + cl->notify_en = false; + cl->status = 0; + } else { + /* TODO: spec is not clear yet about other possible issues */ + cl->status = -EINVAL; + } +} + +/** + * mei_hbm_cl_notify - signal notification event + * + * @dev: the device structure + * @cmd: notification client message + */ +static void mei_hbm_cl_notify(struct mei_device *dev, + struct mei_hbm_cl_cmd *cmd) +{ + struct mei_cl *cl; + + cl = mei_hbm_cl_find_by_cmd(dev, cmd); + if (cl) + mei_cl_notify(cl); +} + +/** * mei_hbm_prop_req - request property for a single client * * @dev: the device structure @@ -610,8 +787,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl, if (rs->status == MEI_CL_CONN_SUCCESS) cl->state = MEI_FILE_CONNECTED; - else + else { cl->state = MEI_FILE_DISCONNECT_REPLY; + if (rs->status == MEI_CL_CONN_NOT_FOUND) + mei_me_cl_del(dev, cl->me_cl); + } cl->status = mei_cl_conn_status_to_errno(rs->status); } @@ -654,6 +834,12 @@ static void mei_hbm_cl_res(struct mei_device *dev, case MEI_FOP_DISCONNECT: mei_hbm_cl_disconnect_res(dev, cl, rs); break; + case MEI_FOP_NOTIFY_START: + mei_hbm_cl_notify_start_res(dev, cl, rs); + break; + case MEI_FOP_NOTIFY_STOP: + mei_hbm_cl_notify_stop_res(dev, cl, rs); + break; default: return; } @@ -694,6 +880,79 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev, } /** + * mei_hbm_pg_enter_res - PG enter response received + * + * @dev: the device structure. + * + * Return: 0 on success, -EPROTO on state mismatch + */ +static int mei_hbm_pg_enter_res(struct mei_device *dev) +{ + if (mei_pg_state(dev) != MEI_PG_OFF || + dev->pg_event != MEI_PG_EVENT_WAIT) { + dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n", + mei_pg_state_str(mei_pg_state(dev)), dev->pg_event); + return -EPROTO; + } + + dev->pg_event = MEI_PG_EVENT_RECEIVED; + wake_up(&dev->wait_pg); + + return 0; +} + +/** + * mei_hbm_pg_resume - process with PG resume + * + * @dev: the device structure. + */ +void mei_hbm_pg_resume(struct mei_device *dev) +{ + pm_request_resume(dev->dev); +} +EXPORT_SYMBOL_GPL(mei_hbm_pg_resume); + +/** + * mei_hbm_pg_exit_res - PG exit response received + * + * @dev: the device structure. + * + * Return: 0 on success, -EPROTO on state mismatch + */ +static int mei_hbm_pg_exit_res(struct mei_device *dev) +{ + if (mei_pg_state(dev) != MEI_PG_ON || + (dev->pg_event != MEI_PG_EVENT_WAIT && + dev->pg_event != MEI_PG_EVENT_IDLE)) { + dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n", + mei_pg_state_str(mei_pg_state(dev)), dev->pg_event); + return -EPROTO; + } + + switch (dev->pg_event) { + case MEI_PG_EVENT_WAIT: + dev->pg_event = MEI_PG_EVENT_RECEIVED; + wake_up(&dev->wait_pg); + break; + case MEI_PG_EVENT_IDLE: + /* + * If the driver is not waiting on this then + * this is HW initiated exit from PG. + * Start runtime pm resume sequence to exit from PG. + */ + dev->pg_event = MEI_PG_EVENT_RECEIVED; + mei_hbm_pg_resume(dev); + break; + default: + WARN(1, "hbm: pg exit response: unexpected pg event = %d\n", + dev->pg_event); + return -EPROTO; + } + + return 0; +} + +/** * mei_hbm_config_features - check what hbm features and commands * are supported by the fw * @@ -709,6 +968,17 @@ static void mei_hbm_config_features(struct mei_device *dev) if (dev->version.major_version == HBM_MAJOR_VERSION_PGI && dev->version.minor_version >= HBM_MINOR_VERSION_PGI) dev->hbm_f_pg_supported = 1; + + if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) + dev->hbm_f_dc_supported = 1; + + /* disconnect on connect timeout instead of link reset */ + if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) + dev->hbm_f_dot_supported = 1; + + /* Notification Event Support */ + if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) + dev->hbm_f_ev_supported = 1; } /** @@ -740,6 +1010,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct hbm_host_version_response *version_res; struct hbm_props_response *props_res; struct hbm_host_enum_response *enum_res; + struct hbm_add_client_request *add_cl_req; + int ret; struct mei_hbm_cl_cmd *cl_cmd; struct hbm_client_connect_request *disconnect_req; @@ -828,24 +1100,17 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) break; case MEI_PG_ISOLATION_ENTRY_RES_CMD: - dev_dbg(dev->dev, "power gate isolation entry response received\n"); - dev->pg_event = MEI_PG_EVENT_RECEIVED; - if (waitqueue_active(&dev->wait_pg)) - wake_up(&dev->wait_pg); + dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n"); + ret = mei_hbm_pg_enter_res(dev); + if (ret) + return ret; break; case MEI_PG_ISOLATION_EXIT_REQ_CMD: - dev_dbg(dev->dev, "power gate isolation exit request received\n"); - dev->pg_event = MEI_PG_EVENT_RECEIVED; - if (waitqueue_active(&dev->wait_pg)) - wake_up(&dev->wait_pg); - else - /* - * If the driver is not waiting on this then - * this is HW initiated exit from PG. - * Start runtime pm resume sequence to exit from PG. - */ - pm_request_resume(dev->dev); + dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n"); + ret = mei_hbm_pg_exit_res(dev); + if (ret) + return ret; break; case HOST_CLIENT_PROPERTIES_RES_CMD: @@ -937,6 +1202,39 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EIO; } break; + + case MEI_HBM_ADD_CLIENT_REQ_CMD: + dev_dbg(dev->dev, "hbm: add client request received\n"); + /* + * after the host receives the enum_resp + * message clients may be added or removed + */ + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && + dev->hbm_state >= MEI_HBM_STOPPED) { + dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + add_cl_req = (struct hbm_add_client_request *)mei_msg; + ret = mei_hbm_fw_add_cl_req(dev, add_cl_req); + if (ret) { + dev_err(dev->dev, "hbm: add client: failed to send response %d\n", + ret); + return -EIO; + } + dev_dbg(dev->dev, "hbm: add client request processed\n"); + break; + + case MEI_HBM_NOTIFY_RES_CMD: + dev_dbg(dev->dev, "hbm: notify response received\n"); + mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd)); + break; + + case MEI_HBM_NOTIFICATION_CMD: + dev_dbg(dev->dev, "hbm: notification\n"); + mei_hbm_cl_notify(dev, cl_cmd); + break; + default: BUG(); break; diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index 2544db7d1649..a2025a5083a3 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -54,6 +54,9 @@ int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl); int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl); bool mei_hbm_version_is_supported(struct mei_device *dev); int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd); +void mei_hbm_pg_resume(struct mei_device *dev); +int mei_hbm_cl_notify_req(struct mei_device *dev, + struct mei_cl *cl, u8 request); #endif /* _MEI_HBM_H_ */ diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 9eb7ed70ace2..a8a68acd3267 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -117,12 +117,17 @@ #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ #define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */ +#define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */ +#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ +#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ +#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ /* * MEI HW Section */ /* Host Firmware Status Registers in PCI Config Space */ #define PCI_CFG_HFS_1 0x40 +# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000 #define PCI_CFG_HFS_2 0x48 #define PCI_CFG_HFS_3 0x60 #define PCI_CFG_HFS_4 0x64 @@ -140,7 +145,8 @@ #define ME_CSR_HA 0xC /* H_HGC_CSR - PGI register */ #define H_HPG_CSR 0x10 - +/* H_D0I3C - D0I3 Control */ +#define H_D0I3C 0x800 /* register bits of H_CSR (Host Control Status register) */ /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ @@ -159,7 +165,14 @@ #define H_IS 0x00000002 /* Host Interrupt Enable */ #define H_IE 0x00000001 +/* Host D0I3 Interrupt Enable */ +#define H_D0I3C_IE 0x00000020 +/* Host D0I3 Interrupt Status */ +#define H_D0I3C_IS 0x00000040 +/* H_CSR masks */ +#define H_CSR_IE_MASK (H_IE | H_D0I3C_IE) +#define H_CSR_IS_MASK (H_IS | H_D0I3C_IS) /* register bits of ME_CSR_HA (ME Control Status Host Access register) */ /* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only @@ -183,8 +196,14 @@ access to ME_CBD */ #define ME_IE_HRA 0x00000001 -/* register bits - H_HPG_CSR */ -#define H_HPG_CSR_PGIHEXR 0x00000001 -#define H_HPG_CSR_PGI 0x00000002 +/* H_HPG_CSR register bits */ +#define H_HPG_CSR_PGIHEXR 0x00000001 +#define H_HPG_CSR_PGI 0x00000002 + +/* H_D0I3C register bits */ +#define H_D0I3C_CIP 0x00000001 +#define H_D0I3C_IR 0x00000002 +#define H_D0I3C_I3 0x00000004 +#define H_D0I3C_RR 0x00000008 #endif /* _MEI_HW_MEI_REGS_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 43d7101ff993..65511d39d89b 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -134,11 +134,40 @@ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) */ static inline void mei_hcsr_set(struct mei_device *dev, u32 reg) { - reg &= ~H_IS; + reg &= ~H_CSR_IS_MASK; mei_hcsr_write(dev, reg); } /** + * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register + * + * @dev: the device structure + * + * Return: H_D0I3C register value (u32) + */ +static inline u32 mei_me_d0i3c_read(const struct mei_device *dev) +{ + u32 reg; + + reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); + trace_mei_reg_read(dev->dev, "H_D0I3C", H_CSR, reg); + + return reg; +} + +/** + * mei_me_d0i3c_write - writes H_D0I3C register to device + * + * @dev: the device structure + * @reg: new register value + */ +static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) +{ + trace_mei_reg_write(dev->dev, "H_D0I3C", H_CSR, reg); + mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); +} + +/** * mei_me_fw_status - read fw status register from pci config space * * @dev: mei device @@ -176,12 +205,25 @@ static int mei_me_fw_status(struct mei_device *dev, */ static void mei_me_hw_config(struct mei_device *dev) { + struct pci_dev *pdev = to_pci_dev(dev->dev); struct mei_me_hw *hw = to_me_hw(dev); - u32 hcsr = mei_hcsr_read(dev); + u32 hcsr, reg; + /* Doesn't change in runtime */ + hcsr = mei_hcsr_read(dev); dev->hbuf_depth = (hcsr & H_CBD) >> 24; + reg = 0; + pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); + hw->d0i3_supported = + ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); + hw->pg_state = MEI_PG_OFF; + if (hw->d0i3_supported) { + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) + hw->pg_state = MEI_PG_ON; + } } /** @@ -208,7 +250,7 @@ static void mei_me_intr_clear(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - if ((hcsr & H_IS) == H_IS) + if (hcsr & H_CSR_IS_MASK) mei_hcsr_write(dev, hcsr); } /** @@ -220,7 +262,7 @@ static void mei_me_intr_enable(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - hcsr |= H_IE; + hcsr |= H_CSR_IE_MASK; mei_hcsr_set(dev, hcsr); } @@ -233,7 +275,7 @@ static void mei_me_intr_disable(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - hcsr &= ~H_IE; + hcsr &= ~H_CSR_IE_MASK; mei_hcsr_set(dev, hcsr); } @@ -253,57 +295,6 @@ static void mei_me_hw_reset_release(struct mei_device *dev) /* complete this write before we set host ready on another CPU */ mmiowb(); } -/** - * mei_me_hw_reset - resets fw via mei csr register. - * - * @dev: the device structure - * @intr_enable: if interrupt should be enabled after reset. - * - * Return: always 0 - */ -static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) -{ - u32 hcsr = mei_hcsr_read(dev); - - /* H_RST may be found lit before reset is started, - * for example if preceding reset flow hasn't completed. - * In that case asserting H_RST will be ignored, therefore - * we need to clean H_RST bit to start a successful reset sequence. - */ - if ((hcsr & H_RST) == H_RST) { - dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); - hcsr &= ~H_RST; - mei_hcsr_set(dev, hcsr); - hcsr = mei_hcsr_read(dev); - } - - hcsr |= H_RST | H_IG | H_IS; - - if (intr_enable) - hcsr |= H_IE; - else - hcsr &= ~H_IE; - - dev->recvd_hw_ready = false; - mei_hcsr_write(dev, hcsr); - - /* - * Host reads the H_CSR once to ensure that the - * posted write to H_CSR completes. - */ - hcsr = mei_hcsr_read(dev); - - if ((hcsr & H_RST) == 0) - dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); - - if ((hcsr & H_RDY) == H_RDY) - dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); - - if (intr_enable == false) - mei_me_hw_reset_release(dev); - - return 0; -} /** * mei_me_host_set_ready - enable device @@ -314,7 +305,7 @@ static void mei_me_host_set_ready(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - hcsr |= H_IE | H_IG | H_RDY; + hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; mei_hcsr_set(dev, hcsr); } @@ -601,13 +592,13 @@ static void mei_me_pg_unset(struct mei_device *dev) } /** - * mei_me_pg_enter_sync - perform pg entry procedure + * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure * * @dev: the device structure * * Return: 0 on success an error code otherwise */ -int mei_me_pg_enter_sync(struct mei_device *dev) +static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); @@ -638,13 +629,13 @@ int mei_me_pg_enter_sync(struct mei_device *dev) } /** - * mei_me_pg_exit_sync - perform pg exit procedure + * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure * * @dev: the device structure * * Return: 0 on success an error code otherwise */ -int mei_me_pg_exit_sync(struct mei_device *dev) +static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); @@ -712,8 +703,12 @@ static bool mei_me_pg_in_transition(struct mei_device *dev) */ static bool mei_me_pg_is_enabled(struct mei_device *dev) { + struct mei_me_hw *hw = to_me_hw(dev); u32 reg = mei_me_mecsr_read(dev); + if (hw->d0i3_supported) + return true; + if ((reg & ME_PGIC_HRA) == 0) goto notsupported; @@ -723,7 +718,8 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev) return true; notsupported: - dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", + dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n", + hw->d0i3_supported, !!(reg & ME_PGIC_HRA), dev->version.major_version, dev->version.minor_version, @@ -734,11 +730,211 @@ notsupported: } /** - * mei_me_pg_intr - perform pg processing in interrupt thread handler + * mei_me_d0i3_set - write d0i3 register bit on mei device. * * @dev: the device structure + * @intr: ask for interrupt + * + * Return: D0I3C register value */ -static void mei_me_pg_intr(struct mei_device *dev) +static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr) +{ + u32 reg = mei_me_d0i3c_read(dev); + + reg |= H_D0I3C_I3; + if (intr) + reg |= H_D0I3C_IR; + else + reg &= ~H_D0I3C_IR; + mei_me_d0i3c_write(dev, reg); + /* read it to ensure HW consistency */ + reg = mei_me_d0i3c_read(dev); + return reg; +} + +/** + * mei_me_d0i3_unset - clean d0i3 register bit on mei device. + * + * @dev: the device structure + * + * Return: D0I3C register value + */ +static u32 mei_me_d0i3_unset(struct mei_device *dev) +{ + u32 reg = mei_me_d0i3c_read(dev); + + reg &= ~H_D0I3C_I3; + reg |= H_D0I3C_IR; + mei_me_d0i3c_write(dev, reg); + /* read it to ensure HW consistency */ + reg = mei_me_d0i3c_read(dev); + return reg; +} + +/** + * mei_me_d0i3_enter_sync - perform d0i3 entry procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_d0i3_enter_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); + unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + int ret; + u32 reg; + + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) { + /* we are in d0i3, nothing to do */ + dev_dbg(dev->dev, "d0i3 set not needed\n"); + ret = 0; + goto on; + } + + /* PGI entry procedure */ + dev->pg_event = MEI_PG_EVENT_WAIT; + + ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); + if (ret) + /* FIXME: should we reset here? */ + goto out; + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { + ret = -ETIME; + goto out; + } + /* end PGI entry procedure */ + + dev->pg_event = MEI_PG_EVENT_INTR_WAIT; + + reg = mei_me_d0i3_set(dev, true); + if (!(reg & H_D0I3C_CIP)) { + dev_dbg(dev->dev, "d0i3 enter wait not needed\n"); + ret = 0; + goto on; + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { + reg = mei_me_d0i3c_read(dev); + if (!(reg & H_D0I3C_I3)) { + ret = -ETIME; + goto out; + } + } + + ret = 0; +on: + hw->pg_state = MEI_PG_ON; +out: + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret); + return ret; +} + +/** + * mei_me_d0i3_enter - perform d0i3 entry procedure + * no hbm PG handshake + * no waiting for confirmation; runs with interrupts + * disabled + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_d0i3_enter(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 reg; + + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) { + /* we are in d0i3, nothing to do */ + dev_dbg(dev->dev, "already d0i3 : set not needed\n"); + goto on; + } + + mei_me_d0i3_set(dev, false); +on: + hw->pg_state = MEI_PG_ON; + dev->pg_event = MEI_PG_EVENT_IDLE; + dev_dbg(dev->dev, "d0i3 enter\n"); + return 0; +} + +/** + * mei_me_d0i3_exit_sync - perform d0i3 exit procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_d0i3_exit_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); + int ret; + u32 reg; + + dev->pg_event = MEI_PG_EVENT_INTR_WAIT; + + reg = mei_me_d0i3c_read(dev); + if (!(reg & H_D0I3C_I3)) { + /* we are not in d0i3, nothing to do */ + dev_dbg(dev->dev, "d0i3 exit not needed\n"); + ret = 0; + goto off; + } + + reg = mei_me_d0i3_unset(dev); + if (!(reg & H_D0I3C_CIP)) { + dev_dbg(dev->dev, "d0i3 exit wait not needed\n"); + ret = 0; + goto off; + } + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_pg, + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + mutex_lock(&dev->device_lock); + + if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { + reg = mei_me_d0i3c_read(dev); + if (reg & H_D0I3C_I3) { + ret = -ETIME; + goto out; + } + } + + ret = 0; +off: + hw->pg_state = MEI_PG_OFF; +out: + dev->pg_event = MEI_PG_EVENT_IDLE; + + dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret); + return ret; +} + +/** + * mei_me_pg_legacy_intr - perform legacy pg processing + * in interrupt thread handler + * + * @dev: the device structure + */ +static void mei_me_pg_legacy_intr(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); @@ -752,6 +948,162 @@ static void mei_me_pg_intr(struct mei_device *dev) } /** + * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler + * + * @dev: the device structure + */ +static void mei_me_d0i3_intr(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT && + (hw->intr_source & H_D0I3C_IS)) { + dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; + if (hw->pg_state == MEI_PG_ON) { + hw->pg_state = MEI_PG_OFF; + if (dev->hbm_state != MEI_HBM_IDLE) { + /* + * force H_RDY because it could be + * wiped off during PG + */ + dev_dbg(dev->dev, "d0i3 set host ready\n"); + mei_me_host_set_ready(dev); + } + } else { + hw->pg_state = MEI_PG_ON; + } + + wake_up(&dev->wait_pg); + } + + if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) { + /* + * HW sent some data and we are in D0i3, so + * we got here because of HW initiated exit from D0i3. + * Start runtime pm resume sequence to exit low power state. + */ + dev_dbg(dev->dev, "d0i3 want resume\n"); + mei_hbm_pg_resume(dev); + } +} + +/** + * mei_me_pg_intr - perform pg processing in interrupt thread handler + * + * @dev: the device structure + */ +static void mei_me_pg_intr(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (hw->d0i3_supported) + mei_me_d0i3_intr(dev); + else + mei_me_pg_legacy_intr(dev); +} + +/** + * mei_me_pg_enter_sync - perform runtime pm entry procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +int mei_me_pg_enter_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (hw->d0i3_supported) + return mei_me_d0i3_enter_sync(dev); + else + return mei_me_pg_legacy_enter_sync(dev); +} + +/** + * mei_me_pg_exit_sync - perform runtime pm exit procedure + * + * @dev: the device structure + * + * Return: 0 on success an error code otherwise + */ +int mei_me_pg_exit_sync(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + + if (hw->d0i3_supported) + return mei_me_d0i3_exit_sync(dev); + else + return mei_me_pg_legacy_exit_sync(dev); +} + +/** + * mei_me_hw_reset - resets fw via mei csr register. + * + * @dev: the device structure + * @intr_enable: if interrupt should be enabled after reset. + * + * Return: 0 on success an error code otherwise + */ +static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_me_hw *hw = to_me_hw(dev); + int ret; + u32 hcsr; + + if (intr_enable) { + mei_me_intr_enable(dev); + if (hw->d0i3_supported) { + ret = mei_me_d0i3_exit_sync(dev); + if (ret) + return ret; + } + } + + hcsr = mei_hcsr_read(dev); + /* H_RST may be found lit before reset is started, + * for example if preceding reset flow hasn't completed. + * In that case asserting H_RST will be ignored, therefore + * we need to clean H_RST bit to start a successful reset sequence. + */ + if ((hcsr & H_RST) == H_RST) { + dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); + hcsr &= ~H_RST; + mei_hcsr_set(dev, hcsr); + hcsr = mei_hcsr_read(dev); + } + + hcsr |= H_RST | H_IG | H_CSR_IS_MASK; + + if (!intr_enable) + hcsr &= ~H_CSR_IE_MASK; + + dev->recvd_hw_ready = false; + mei_hcsr_write(dev, hcsr); + + /* + * Host reads the H_CSR once to ensure that the + * posted write to H_CSR completes. + */ + hcsr = mei_hcsr_read(dev); + + if ((hcsr & H_RST) == 0) + dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); + + if ((hcsr & H_RDY) == H_RDY) + dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); + + if (!intr_enable) { + mei_me_hw_reset_release(dev); + if (hw->d0i3_supported) { + ret = mei_me_d0i3_enter(dev); + if (ret) + return ret; + } + } + return 0; +} + +/** * mei_me_irq_quick_handler - The ISR of the MEI device * * @irq: The irq number @@ -759,16 +1111,20 @@ static void mei_me_pg_intr(struct mei_device *dev) * * Return: irqreturn_t */ - irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) { - struct mei_device *dev = (struct mei_device *) dev_id; - u32 hcsr = mei_hcsr_read(dev); + struct mei_device *dev = (struct mei_device *)dev_id; + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr; - if ((hcsr & H_IS) != H_IS) + hcsr = mei_hcsr_read(dev); + if (!(hcsr & H_CSR_IS_MASK)) return IRQ_NONE; - /* clear H_IS bit in H_CSR */ + hw->intr_source = hcsr & H_CSR_IS_MASK; + dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source); + + /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */ mei_hcsr_write(dev, hcsr); return IRQ_WAKE_THREAD; @@ -796,11 +1152,6 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) mutex_lock(&dev->device_lock); mei_io_list_init(&complete_list); - /* Ack the interrupt here - * In case of MSI we don't go through the quick handler */ - if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) - mei_clear_interrupts(dev); - /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { dev_warn(dev->dev, "FW not ready: resetting.\n"); diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 6022d52af6f6..2ee14dc1b2ea 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -50,13 +50,17 @@ struct mei_cfg { * struct mei_me_hw - me hw specific data * * @cfg: per device generation config and ops - * @mem_addr: io memory address - * @pg_state: power gating state + * @mem_addr: io memory address + * @intr_source: interrupt source + * @pg_state: power gating state + * @d0i3_supported: di03 support */ struct mei_me_hw { const struct mei_cfg *cfg; void __iomem *mem_addr; + u32 intr_source; enum mei_pg_state pg_state; + bool d0i3_supported; }; #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 16fef6dc4dd7..4cebde85924f 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -31,14 +31,15 @@ #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ -#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ -#define MEI_HBM_TIMEOUT 1 /* 1 second */ +#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ +#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ +#define MEI_HBM_TIMEOUT 1 /* 1 second */ /* * MEI Version */ -#define HBM_MINOR_VERSION 1 -#define HBM_MAJOR_VERSION 1 +#define HBM_MINOR_VERSION 0 +#define HBM_MAJOR_VERSION 2 /* * MEI version with PGI support @@ -46,6 +47,24 @@ #define HBM_MINOR_VERSION_PGI 1 #define HBM_MAJOR_VERSION_PGI 1 +/* + * MEI version with Dynamic clients support + */ +#define HBM_MINOR_VERSION_DC 0 +#define HBM_MAJOR_VERSION_DC 2 + +/* + * MEI version with disconnect on connection timeout support + */ +#define HBM_MINOR_VERSION_DOT 0 +#define HBM_MAJOR_VERSION_DOT 2 + +/* + * MEI version with notifcation support + */ +#define HBM_MINOR_VERSION_EV 0 +#define HBM_MAJOR_VERSION_EV 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ @@ -81,6 +100,13 @@ #define MEI_PG_ISOLATION_EXIT_REQ_CMD 0x0b #define MEI_PG_ISOLATION_EXIT_RES_CMD 0x8b +#define MEI_HBM_ADD_CLIENT_REQ_CMD 0x0f +#define MEI_HBM_ADD_CLIENT_RES_CMD 0x8f + +#define MEI_HBM_NOTIFY_REQ_CMD 0x10 +#define MEI_HBM_NOTIFY_RES_CMD 0x90 +#define MEI_HBM_NOTIFICATION_CMD 0x11 + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -136,6 +162,7 @@ enum mei_cl_connect_status { MEI_CL_CONN_ALREADY_STARTED = MEI_HBMS_ALREADY_EXISTS, MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED, MEI_CL_CONN_MESSAGE_SMALL = MEI_HBMS_INVALID_PARAMETER, + MEI_CL_CONN_NOT_ALLOWED = MEI_HBMS_NOT_ALLOWED, }; /* @@ -213,9 +240,17 @@ struct hbm_me_stop_request { u8 reserved[2]; } __packed; +/** + * struct hbm_host_enum_request - enumeration request from host to fw + * + * @hbm_cmd: bus message command header + * @allow_add: allow dynamic clients add HBM version >= 2.0 + * @reserved: reserved + */ struct hbm_host_enum_request { u8 hbm_cmd; - u8 reserved[3]; + u8 allow_add; + u8 reserved[2]; } __packed; struct hbm_host_enum_response { @@ -248,6 +283,38 @@ struct hbm_props_response { } __packed; /** + * struct hbm_add_client_request - request to add a client + * might be sent by fw after enumeration has already completed + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @reserved: reserved + * @client_properties: client properties + */ +struct hbm_add_client_request { + u8 hbm_cmd; + u8 me_addr; + u8 reserved[2]; + struct mei_client_properties client_properties; +} __packed; + +/** + * struct hbm_add_client_response - response to add a client + * sent by the host to report client addition status to fw + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @status: if HBMS_SUCCESS then the client can now accept connections. + * @reserved: reserved + */ +struct hbm_add_client_response { + u8 hbm_cmd; + u8 me_addr; + u8 status; + u8 reserved[1]; +} __packed; + +/** * struct hbm_power_gate - power gate request/response * * @hbm_cmd: bus message command header @@ -298,5 +365,62 @@ struct hbm_flow_control { u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH]; } __packed; +#define MEI_HBM_NOTIFICATION_START 1 +#define MEI_HBM_NOTIFICATION_STOP 0 +/** + * struct hbm_notification_request - start/stop notification request + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @start: start = 1 or stop = 0 asynchronous notifications + */ +struct hbm_notification_request { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 start; +} __packed; + +/** + * struct hbm_notification_response - start/stop notification response + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: - address of the client in the driver + * @status: (mei_hbm_status) response status for the request + * - MEI_HBMS_SUCCESS: successful stop/start + * - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found. + * - MEI_HBMS_ALREADY_STARTED: for start requests for a previously + * started notification. + * - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom + * asynchronous notifications are currently disabled. + * + * @start: start = 1 or stop = 0 asynchronous notifications + * @reserved: reserved + */ +struct hbm_notification_response { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 status; + u8 start; + u8 reserved[3]; +} __packed; + +/** + * struct hbm_notification - notification event + * + * @hbm_cmd: bus message command header + * @me_addr: address of the client in ME + * @host_addr: address of the client in the driver + * @reserved: reserved for alignment + */ +struct hbm_notification { + u8 hbm_cmd; + u8 me_addr; + u8 host_addr; + u8 reserved[1]; +} __packed; #endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 00c3865ca3b1..e374661652cd 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -331,7 +331,7 @@ void mei_stop(struct mei_device *dev) mei_cancel_work(dev); - mei_nfc_host_exit(dev); + mei_cl_bus_remove_devices(dev); mutex_lock(&dev->device_lock); @@ -390,6 +390,7 @@ void mei_device_init(struct mei_device *dev, INIT_LIST_HEAD(&dev->me_clients); mutex_init(&dev->device_lock); init_rwsem(&dev->me_clients_rwsem); + mutex_init(&dev->cl_bus_lock); init_waitqueue_head(&dev->wait_hw_ready); init_waitqueue_head(&dev->wait_pg); init_waitqueue_head(&dev->wait_hbm_start); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 3f3405269c39..c418d7888994 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -403,6 +403,13 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) if (ret) return ret; break; + + case MEI_FOP_NOTIFY_START: + case MEI_FOP_NOTIFY_STOP: + ret = mei_cl_irq_notify(cl, cb, cmpl_list); + if (ret) + return ret; + break; default: BUG(); } @@ -424,6 +431,24 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) EXPORT_SYMBOL_GPL(mei_irq_write_handler); +/** + * mei_connect_timeout - connect/disconnect timeouts + * + * @cl: host client + */ +static void mei_connect_timeout(struct mei_cl *cl) +{ + struct mei_device *dev = cl->dev; + + if (cl->state == MEI_FILE_CONNECTING) { + if (dev->hbm_f_dot_supported) { + cl->state = MEI_FILE_DISCONNECT_REQUIRED; + wake_up(&cl->wait); + return; + } + } + mei_reset(dev); +} /** * mei_timer - timer function. @@ -464,7 +489,7 @@ void mei_timer(struct work_struct *work) if (cl->timer_count) { if (--cl->timer_count == 0) { dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); - mei_reset(dev); + mei_connect_timeout(cl); goto out; } } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index e9513d651cd3..b2f2486b3d75 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -446,6 +446,45 @@ end: } /** + * mei_ioctl_client_notify_request - + * propagate event notification request to client + * + * @file: pointer to file structure + * @request: 0 - disable, 1 - enable + * + * Return: 0 on success , <0 on error + */ +static int mei_ioctl_client_notify_request(struct file *file, u32 request) +{ + struct mei_cl *cl = file->private_data; + + return mei_cl_notify_request(cl, file, request); +} + +/** + * mei_ioctl_client_notify_get - wait for notification request + * + * @file: pointer to file structure + * @notify_get: 0 - disable, 1 - enable + * + * Return: 0 on success , <0 on error + */ +static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get) +{ + struct mei_cl *cl = file->private_data; + bool notify_ev; + bool block = (file->f_flags & O_NONBLOCK) == 0; + int rets; + + rets = mei_cl_notify_get(cl, block, ¬ify_ev); + if (rets) + return rets; + + *notify_get = notify_ev ? 1 : 0; + return 0; +} + +/** * mei_ioctl - the IOCTL function * * @file: pointer to file structure @@ -459,6 +498,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) struct mei_device *dev; struct mei_cl *cl = file->private_data; struct mei_connect_client_data connect_data; + u32 notify_get, notify_req; int rets; @@ -499,6 +539,33 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; + case IOCTL_MEI_NOTIFY_SET: + dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n"); + if (copy_from_user(¬ify_req, + (char __user *)data, sizeof(notify_req))) { + dev_dbg(dev->dev, "failed to copy data from userland\n"); + rets = -EFAULT; + goto out; + } + rets = mei_ioctl_client_notify_request(file, notify_req); + break; + + case IOCTL_MEI_NOTIFY_GET: + dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n"); + rets = mei_ioctl_client_notify_get(file, ¬ify_get); + if (rets) + goto out; + + dev_dbg(dev->dev, "copy connect data to user\n"); + if (copy_to_user((char __user *)data, + ¬ify_get, sizeof(notify_get))) { + dev_dbg(dev->dev, "failed to copy data to userland\n"); + rets = -EFAULT; + goto out; + + } + break; + default: dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); rets = -ENOIOCTLCMD; @@ -541,6 +608,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait) struct mei_cl *cl = file->private_data; struct mei_device *dev; unsigned int mask = 0; + bool notify_en; if (WARN_ON(!cl || !cl->dev)) return POLLERR; @@ -549,6 +617,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait) mutex_lock(&dev->device_lock); + notify_en = cl->notify_en && (req_events & POLLPRI); if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) { @@ -561,6 +630,12 @@ static unsigned int mei_poll(struct file *file, poll_table *wait) goto out; } + if (notify_en) { + poll_wait(file, &cl->ev_wait, wait); + if (cl->notify_ev) + mask |= POLLPRI; + } + if (req_events & (POLLIN | POLLRDNORM)) { poll_wait(file, &cl->rx_wait, wait); @@ -576,6 +651,26 @@ out: } /** + * mei_fasync - asynchronous io support + * + * @fd: file descriptor + * @file: pointer to file structure + * @band: band bitmap + * + * Return: poll mask + */ +static int mei_fasync(int fd, struct file *file, int band) +{ + + struct mei_cl *cl = file->private_data; + + if (!mei_cl_is_connected(cl)) + return POLLERR; + + return fasync_helper(fd, file, band, &cl->ev_async); +} + +/** * fw_status_show - mei device attribute show method * * @device: device pointer @@ -627,6 +722,7 @@ static const struct file_operations mei_fops = { .release = mei_release, .write = mei_write, .poll = mei_poll, + .fasync = mei_fasync, .llseek = no_llseek }; diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 453f6a333b42..e25ee16c658e 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -89,6 +89,7 @@ enum file_state { MEI_FILE_CONNECTED, MEI_FILE_DISCONNECTING, MEI_FILE_DISCONNECT_REPLY, + MEI_FILE_DISCONNECT_REQUIRED, MEI_FILE_DISCONNECTED, }; @@ -135,6 +136,8 @@ enum mei_wd_states { * @MEI_FOP_CONNECT: connect * @MEI_FOP_DISCONNECT: disconnect * @MEI_FOP_DISCONNECT_RSP: disconnect response + * @MEI_FOP_NOTIFY_START: start notification + * @MEI_FOP_NOTIFY_STOP: stop notification */ enum mei_cb_file_ops { MEI_FOP_READ = 0, @@ -142,6 +145,8 @@ enum mei_cb_file_ops { MEI_FOP_CONNECT, MEI_FOP_DISCONNECT, MEI_FOP_DISCONNECT_RSP, + MEI_FOP_NOTIFY_START, + MEI_FOP_NOTIFY_STOP, }; /* @@ -178,7 +183,7 @@ struct mei_fw_status { * @client_id: me client id * @mei_flow_ctrl_creds: flow control credits * @connect_count: number connections to this client - * @reserved: reserved + * @bus_added: added to bus */ struct mei_me_client { struct list_head list; @@ -187,7 +192,7 @@ struct mei_me_client { u8 client_id; u8 mei_flow_ctrl_creds; u8 connect_count; - u8 reserved; + u8 bus_added; }; @@ -230,18 +235,21 @@ struct mei_cl_cb { * @tx_wait: wait queue for tx completion * @rx_wait: wait queue for rx completion * @wait: wait queue for management operation + * @ev_wait: notification wait queue + * @ev_async: event async notification * @status: connection status * @me_cl: fw client connected * @host_client_id: host id * @mei_flow_ctrl_creds: transmit flow credentials * @timer_count: watchdog timer for operation completion * @reserved: reserved for alignment + * @notify_en: notification - enabled/disabled + * @notify_ev: pending notification event * @writing_state: state of the tx * @rd_pending: pending read credits * @rd_completed: completed read * - * @device: device on the mei client bus - * @device_link: link to bus clients + * @cldev: device on the mei client bus */ struct mei_cl { struct list_head link; @@ -250,19 +258,21 @@ struct mei_cl { wait_queue_head_t tx_wait; wait_queue_head_t rx_wait; wait_queue_head_t wait; + wait_queue_head_t ev_wait; + struct fasync_struct *ev_async; int status; struct mei_me_client *me_cl; u8 host_client_id; u8 mei_flow_ctrl_creds; u8 timer_count; u8 reserved; + u8 notify_en; + u8 notify_ev; enum mei_file_transaction_states writing_state; struct list_head rd_pending; struct list_head rd_completed; - /* MEI CL bus data */ - struct mei_cl_device *device; - struct list_head device_link; + struct mei_cl_device *cldev; }; /** struct mei_hw_ops @@ -329,21 +339,16 @@ struct mei_hw_ops { }; /* MEI bus API*/ - -struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, - struct mei_me_client *me_cl, - struct mei_cl *cl, - char *name); -void mei_cl_remove_device(struct mei_cl_device *device); - +void mei_cl_bus_rescan(struct mei_device *bus); +void mei_cl_dev_fixup(struct mei_cl_device *dev); ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, bool blocking); ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); void mei_cl_bus_rx_event(struct mei_cl *cl); -void mei_cl_bus_remove_devices(struct mei_device *dev); +void mei_cl_bus_notify_event(struct mei_cl *cl); +void mei_cl_bus_remove_devices(struct mei_device *bus); int mei_cl_bus_init(void); void mei_cl_bus_exit(void); -struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid); /** * enum mei_pg_event - power gating transition events @@ -416,7 +421,10 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @wr_msg : the buffer for hbm control messages * * @version : HBM protocol version in use - * @hbm_f_pg_supported : hbm feature pgi protocol + * @hbm_f_pg_supported : hbm feature pgi protocol + * @hbm_f_dc_supported : hbm feature dynamic clients + * @hbm_f_dot_supported : hbm feature disconnect on timeout + * @hbm_f_ev_supported : hbm feature event notification * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -447,6 +455,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @reset_work : work item for the device reset * * @device_list : mei client bus list + * @cl_bus_lock : client bus list lock * * @dbgfs_dir : debugfs mei root directory * @@ -509,6 +518,9 @@ struct mei_device { struct hbm_version version; unsigned int hbm_f_pg_supported:1; + unsigned int hbm_f_dc_supported:1; + unsigned int hbm_f_dot_supported:1; + unsigned int hbm_f_ev_supported:1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; @@ -543,6 +555,7 @@ struct mei_device { /* List of bus devices */ struct list_head device_list; + struct mutex cl_bus_lock; #if IS_ENABLED(CONFIG_DEBUG_FS) struct dentry *dbgfs_dir; diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c deleted file mode 100644 index 290ef3037437..000000000000 --- a/drivers/misc/mei/nfc.c +++ /dev/null @@ -1,415 +0,0 @@ -/* - * - * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2013, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/device.h> -#include <linux/slab.h> - -#include <linux/mei_cl_bus.h> - -#include "mei_dev.h" -#include "client.h" - -struct mei_nfc_cmd { - u8 command; - u8 status; - u16 req_id; - u32 reserved; - u16 data_size; - u8 sub_command; - u8 data[]; -} __packed; - -struct mei_nfc_reply { - u8 command; - u8 status; - u16 req_id; - u32 reserved; - u16 data_size; - u8 sub_command; - u8 reply_status; - u8 data[]; -} __packed; - -struct mei_nfc_if_version { - u8 radio_version_sw[3]; - u8 reserved[3]; - u8 radio_version_hw[3]; - u8 i2c_addr; - u8 fw_ivn; - u8 vendor_id; - u8 radio_type; -} __packed; - -struct mei_nfc_connect { - u8 fw_ivn; - u8 vendor_id; -} __packed; - -struct mei_nfc_connect_resp { - u8 fw_ivn; - u8 vendor_id; - u16 me_major; - u16 me_minor; - u16 me_hotfix; - u16 me_build; -} __packed; - -struct mei_nfc_hci_hdr { - u8 cmd; - u8 status; - u16 req_id; - u32 reserved; - u16 data_size; -} __packed; - -#define MEI_NFC_CMD_MAINTENANCE 0x00 -#define MEI_NFC_CMD_HCI_SEND 0x01 -#define MEI_NFC_CMD_HCI_RECV 0x02 - -#define MEI_NFC_SUBCMD_CONNECT 0x00 -#define MEI_NFC_SUBCMD_IF_VERSION 0x01 - -#define MEI_NFC_HEADER_SIZE 10 - -/** - * struct mei_nfc_dev - NFC mei device - * - * @me_cl: NFC me client - * @cl: NFC host client - * @cl_info: NFC info host client - * @init_work: perform connection to the info client - * @fw_ivn: NFC Interface Version Number - * @vendor_id: NFC manufacturer ID - * @radio_type: NFC radio type - * @bus_name: bus name - * - */ -struct mei_nfc_dev { - struct mei_me_client *me_cl; - struct mei_cl *cl; - struct mei_cl *cl_info; - struct work_struct init_work; - u8 fw_ivn; - u8 vendor_id; - u8 radio_type; - char *bus_name; -}; - -/* UUIDs for NFC F/W clients */ -const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, - 0x94, 0xd4, 0x50, 0x26, - 0x67, 0x23, 0x77, 0x5c); - -static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d, - 0x48, 0xa4, 0xef, 0xab, - 0xba, 0x8a, 0x12, 0x06); - -/* Vendors */ -#define MEI_NFC_VENDOR_INSIDE 0x00 -#define MEI_NFC_VENDOR_NXP 0x01 - -/* Radio types */ -#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00 -#define MEI_NFC_VENDOR_NXP_PN544 0x01 - -static void mei_nfc_free(struct mei_nfc_dev *ndev) -{ - if (!ndev) - return; - - if (ndev->cl) { - list_del(&ndev->cl->device_link); - mei_cl_unlink(ndev->cl); - kfree(ndev->cl); - } - - if (ndev->cl_info) { - list_del(&ndev->cl_info->device_link); - mei_cl_unlink(ndev->cl_info); - kfree(ndev->cl_info); - } - - mei_me_cl_put(ndev->me_cl); - kfree(ndev); -} - -static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) -{ - struct mei_device *dev; - - if (!ndev->cl) - return -ENODEV; - - dev = ndev->cl->dev; - - switch (ndev->vendor_id) { - case MEI_NFC_VENDOR_INSIDE: - switch (ndev->radio_type) { - case MEI_NFC_VENDOR_INSIDE_UREAD: - ndev->bus_name = "microread"; - return 0; - - default: - dev_err(dev->dev, "Unknown radio type 0x%x\n", - ndev->radio_type); - - return -EINVAL; - } - - case MEI_NFC_VENDOR_NXP: - switch (ndev->radio_type) { - case MEI_NFC_VENDOR_NXP_PN544: - ndev->bus_name = "pn544"; - return 0; - default: - dev_err(dev->dev, "Unknown radio type 0x%x\n", - ndev->radio_type); - - return -EINVAL; - } - - default: - dev_err(dev->dev, "Unknown vendor ID 0x%x\n", - ndev->vendor_id); - - return -EINVAL; - } - - return 0; -} - -static int mei_nfc_if_version(struct mei_nfc_dev *ndev) -{ - struct mei_device *dev; - struct mei_cl *cl; - - struct mei_nfc_cmd cmd; - struct mei_nfc_reply *reply = NULL; - struct mei_nfc_if_version *version; - size_t if_version_length; - int bytes_recv, ret; - - cl = ndev->cl_info; - dev = cl->dev; - - memset(&cmd, 0, sizeof(struct mei_nfc_cmd)); - cmd.command = MEI_NFC_CMD_MAINTENANCE; - cmd.data_size = 1; - cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION; - - ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1); - if (ret < 0) { - dev_err(dev->dev, "Could not send IF version cmd\n"); - return ret; - } - - /* to be sure on the stack we alloc memory */ - if_version_length = sizeof(struct mei_nfc_reply) + - sizeof(struct mei_nfc_if_version); - - reply = kzalloc(if_version_length, GFP_KERNEL); - if (!reply) - return -ENOMEM; - - bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); - if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { - dev_err(dev->dev, "Could not read IF version\n"); - ret = -EIO; - goto err; - } - - version = (struct mei_nfc_if_version *)reply->data; - - ndev->fw_ivn = version->fw_ivn; - ndev->vendor_id = version->vendor_id; - ndev->radio_type = version->radio_type; - -err: - kfree(reply); - return ret; -} - -static void mei_nfc_init(struct work_struct *work) -{ - struct mei_device *dev; - struct mei_cl_device *cldev; - struct mei_nfc_dev *ndev; - struct mei_cl *cl_info; - struct mei_me_client *me_cl_info; - - ndev = container_of(work, struct mei_nfc_dev, init_work); - - cl_info = ndev->cl_info; - dev = cl_info->dev; - - mutex_lock(&dev->device_lock); - - /* check for valid client id */ - me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); - if (!me_cl_info) { - mutex_unlock(&dev->device_lock); - dev_info(dev->dev, "nfc: failed to find the info client\n"); - goto err; - } - - if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) { - mei_me_cl_put(me_cl_info); - mutex_unlock(&dev->device_lock); - dev_err(dev->dev, "Could not connect to the NFC INFO ME client"); - - goto err; - } - mei_me_cl_put(me_cl_info); - mutex_unlock(&dev->device_lock); - - if (mei_nfc_if_version(ndev) < 0) { - dev_err(dev->dev, "Could not get the NFC interface version"); - - goto err; - } - - dev_info(dev->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n", - ndev->fw_ivn, ndev->vendor_id, ndev->radio_type); - - mutex_lock(&dev->device_lock); - - if (mei_cl_disconnect(cl_info) < 0) { - mutex_unlock(&dev->device_lock); - dev_err(dev->dev, "Could not disconnect the NFC INFO ME client"); - - goto err; - } - - mutex_unlock(&dev->device_lock); - - if (mei_nfc_build_bus_name(ndev) < 0) { - dev_err(dev->dev, "Could not build the bus ID name\n"); - return; - } - - cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl, - ndev->bus_name); - if (!cldev) { - dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n"); - - goto err; - } - - cldev->priv_data = ndev; - - - return; - -err: - mutex_lock(&dev->device_lock); - mei_nfc_free(ndev); - mutex_unlock(&dev->device_lock); - -} - - -int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl) -{ - struct mei_nfc_dev *ndev; - struct mei_cl *cl_info, *cl; - int ret; - - - /* in case of internal reset bail out - * as the device is already setup - */ - cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid); - if (cl) - return 0; - - ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL); - if (!ndev) { - ret = -ENOMEM; - goto err; - } - - ndev->me_cl = mei_me_cl_get(me_cl); - if (!ndev->me_cl) { - ret = -ENODEV; - goto err; - } - - cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY); - if (IS_ERR(cl_info)) { - ret = PTR_ERR(cl_info); - goto err; - } - - list_add_tail(&cl_info->device_link, &dev->device_list); - - ndev->cl_info = cl_info; - - cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY); - if (IS_ERR(cl)) { - ret = PTR_ERR(cl); - goto err; - } - - list_add_tail(&cl->device_link, &dev->device_list); - - ndev->cl = cl; - - INIT_WORK(&ndev->init_work, mei_nfc_init); - schedule_work(&ndev->init_work); - - return 0; - -err: - mei_nfc_free(ndev); - - return ret; -} - -void mei_nfc_host_exit(struct mei_device *dev) -{ - struct mei_nfc_dev *ndev; - struct mei_cl *cl; - struct mei_cl_device *cldev; - - cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid); - if (!cl) - return; - - cldev = cl->device; - if (!cldev) - return; - - ndev = (struct mei_nfc_dev *)cldev->priv_data; - if (ndev) - cancel_work_sync(&ndev->init_work); - - cldev->priv_data = NULL; - - /* Need to remove the device here - * since mei_nfc_free will unlink the clients - */ - mei_cl_remove_device(cldev); - - mutex_lock(&dev->device_lock); - mei_nfc_free(ndev); - mutex_unlock(&dev->device_lock); -} - - diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 23f71f5ce4fb..27678d8154e0 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -82,6 +82,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)}, {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, + /* required last entry */ {0, } }; @@ -128,6 +133,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); struct mei_device *dev; struct mei_me_hw *hw; + unsigned int irqflags; int err; @@ -180,17 +186,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_enable_msi(pdev); /* request and enable interrupt */ - if (pci_dev_msi_enabled(pdev)) - err = request_threaded_irq(pdev->irq, - NULL, - mei_me_irq_thread_handler, - IRQF_ONESHOT, KBUILD_MODNAME, dev); - else - err = request_threaded_irq(pdev->irq, + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; + + err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, - IRQF_SHARED, KBUILD_MODNAME, dev); - + irqflags, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", pdev->irq); @@ -319,6 +320,7 @@ static int mei_me_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; + unsigned int irqflags; int err; dev = pci_get_drvdata(pdev); @@ -327,17 +329,13 @@ static int mei_me_pci_resume(struct device *device) pci_enable_msi(pdev); + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; + /* request and enable interrupt */ - if (pci_dev_msi_enabled(pdev)) - err = request_threaded_irq(pdev->irq, - NULL, - mei_me_irq_thread_handler, - IRQF_ONESHOT, KBUILD_MODNAME, dev); - else - err = request_threaded_irq(pdev->irq, + err = request_threaded_irq(pdev->irq, mei_me_irq_quick_handler, mei_me_irq_thread_handler, - IRQF_SHARED, KBUILD_MODNAME, dev); + irqflags, KBUILD_MODNAME, dev); if (err) { dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c new file mode 100644 index 000000000000..7b4a2da487a5 --- /dev/null +++ b/drivers/misc/qcom-coincell.c @@ -0,0 +1,152 @@ +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, Sony Mobile Communications Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +struct qcom_coincell { + struct device *dev; + struct regmap *regmap; + u32 base_addr; +}; + +#define QCOM_COINCELL_REG_RSET 0x44 +#define QCOM_COINCELL_REG_VSET 0x45 +#define QCOM_COINCELL_REG_ENABLE 0x46 + +#define QCOM_COINCELL_ENABLE BIT(7) + +static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 }; +static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 }; +/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */ + +/* if enable==0, rset and vset are ignored */ +static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset, + int vset, bool enable) +{ + int i, j, rc; + + /* if disabling, just do that and skip other operations */ + if (!enable) + return regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0); + + /* find index for current-limiting resistor */ + for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++) + if (rset == qcom_rset_map[i]) + break; + + if (i >= ARRAY_SIZE(qcom_rset_map)) { + dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset); + return -EINVAL; + } + + /* find index for charge voltage */ + for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++) + if (vset == qcom_vset_map[j]) + break; + + if (j >= ARRAY_SIZE(qcom_vset_map)) { + dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset); + return -EINVAL; + } + + rc = regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_RSET, i); + if (rc) { + /* + * This is mainly to flag a bad base_addr (reg) from dts. + * Other failures writing to the registers should be + * extremely rare, or indicative of problems that + * should be reported elsewhere (eg. spmi failure). + */ + dev_err(chgr->dev, "could not write to RSET register\n"); + return rc; + } + + rc = regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_VSET, j); + if (rc) + return rc; + + /* set 'enable' register */ + return regmap_write(chgr->regmap, + chgr->base_addr + QCOM_COINCELL_REG_ENABLE, + QCOM_COINCELL_ENABLE); +} + +static int qcom_coincell_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct qcom_coincell chgr; + u32 rset, vset; + bool enable; + int rc; + + chgr.dev = &pdev->dev; + + chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chgr.regmap) { + dev_err(chgr.dev, "Unable to get regmap\n"); + return -EINVAL; + } + + rc = of_property_read_u32(node, "reg", &chgr.base_addr); + if (rc) + return rc; + + enable = !of_property_read_bool(node, "qcom,charger-disable"); + + if (enable) { + rc = of_property_read_u32(node, "qcom,rset-ohms", &rset); + if (rc) { + dev_err(chgr.dev, + "can't find 'qcom,rset-ohms' in DT block"); + return rc; + } + + rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset); + if (rc) { + dev_err(chgr.dev, + "can't find 'qcom,vset-millivolts' in DT block"); + return rc; + } + } + + return qcom_coincell_chgr_config(&chgr, rset, vset, enable); +} + +static const struct of_device_id qcom_coincell_match_table[] = { + { .compatible = "qcom,pm8941-coincell", }, + {} +}; + +MODULE_DEVICE_TABLE(of, qcom_coincell_match_table); + +static struct platform_driver qcom_coincell_driver = { + .driver = { + .name = "qcom-spmi-coincell", + .of_match_table = qcom_coincell_match_table, + }, + .probe = qcom_coincell_probe, +}; + +module_platform_driver(qcom_coincell_driver); + +MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 5027b8ffae43..71b64550b591 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -36,8 +36,6 @@ #include <linux/skbuff.h> #include <linux/ti_wilink_st.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> #define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; @@ -45,9 +43,6 @@ static struct platform_device *st_kim_devices[MAX_ST_DEVICES]; /**********************************************************************/ /* internal functions */ -struct ti_st_plat_data *dt_pdata; -static struct ti_st_plat_data *get_platform_data(struct device *dev); - /** * st_get_plat_device - * function which returns the reference to the platform device @@ -469,12 +464,7 @@ long st_kim_start(void *kim_data) struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; pr_info(" %s", __func__); - if (kim_gdata->kim_pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else { - pdata = kim_gdata->kim_pdev->dev.platform_data; - } + pdata = kim_gdata->kim_pdev->dev.platform_data; do { /* platform specific enabling code here */ @@ -482,9 +472,9 @@ long st_kim_start(void *kim_data) pdata->chip_enable(kim_gdata); /* Configure BT nShutdown to HIGH state */ - gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); mdelay(5); /* FIXME: a proper toggle */ - gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); mdelay(100); /* re-initialize the completion */ reinit_completion(&kim_gdata->ldisc_installed); @@ -534,18 +524,12 @@ long st_kim_stop(void *kim_data) { long err = 0; struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; - struct ti_st_plat_data *pdata; + struct ti_st_plat_data *pdata = + kim_gdata->kim_pdev->dev.platform_data; struct tty_struct *tty = kim_gdata->core_data->tty; reinit_completion(&kim_gdata->ldisc_installed); - if (kim_gdata->kim_pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else - pdata = kim_gdata->kim_pdev->dev.platform_data; - - if (tty) { /* can be called before ldisc is installed */ /* Flush any pending characters in the driver and discipline. */ tty_ldisc_flush(tty); @@ -566,11 +550,11 @@ long st_kim_stop(void *kim_data) } /* By default configure BT nShutdown to LOW state */ - gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); mdelay(1); - gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH); mdelay(1); - gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); + gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW); /* platform specific disable */ if (pdata->chip_disable) @@ -737,52 +721,13 @@ static const struct file_operations list_debugfs_fops = { * board-*.c file */ -static const struct of_device_id kim_of_match[] = { -{ - .compatible = "kim", - }, - {} -}; -MODULE_DEVICE_TABLE(of, kim_of_match); - -static struct ti_st_plat_data *get_platform_data(struct device *dev) -{ - struct device_node *np = dev->of_node; - const u32 *dt_property; - int len; - - dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL); - if (!dt_pdata) - return NULL; - - dt_property = of_get_property(np, "dev_name", &len); - if (dt_property) - memcpy(&dt_pdata->dev_name, dt_property, len); - of_property_read_u32(np, "nshutdown_gpio", - &dt_pdata->nshutdown_gpio); - of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl); - of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate); - - return dt_pdata; -} - static struct dentry *kim_debugfs_dir; static int kim_probe(struct platform_device *pdev) { struct kim_data_s *kim_gdata; - struct ti_st_plat_data *pdata; + struct ti_st_plat_data *pdata = pdev->dev.platform_data; int err; - if (pdev->dev.of_node) - pdata = get_platform_data(&pdev->dev); - else - pdata = pdev->dev.platform_data; - - if (pdata == NULL) { - dev_err(&pdev->dev, "Platform Data is missing\n"); - return -ENXIO; - } - if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) { /* multiple devices could exist */ st_kim_devices[pdev->id] = pdev; @@ -863,16 +808,9 @@ err_core_init: static int kim_remove(struct platform_device *pdev) { /* free the GPIOs requested */ - struct ti_st_plat_data *pdata; + struct ti_st_plat_data *pdata = pdev->dev.platform_data; struct kim_data_s *kim_gdata; - if (pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else { - pdata = pdev->dev.platform_data; - } - kim_gdata = platform_get_drvdata(pdev); /* Free the Bluetooth/FM/GPIO @@ -890,22 +828,12 @@ static int kim_remove(struct platform_device *pdev) kfree(kim_gdata); kim_gdata = NULL; - kfree(dt_pdata); - dt_pdata = NULL; - return 0; } static int kim_suspend(struct platform_device *pdev, pm_message_t state) { - struct ti_st_plat_data *pdata; - - if (pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else { - pdata = pdev->dev.platform_data; - } + struct ti_st_plat_data *pdata = pdev->dev.platform_data; if (pdata->suspend) return pdata->suspend(pdev, state); @@ -915,14 +843,7 @@ static int kim_suspend(struct platform_device *pdev, pm_message_t state) static int kim_resume(struct platform_device *pdev) { - struct ti_st_plat_data *pdata; - - if (pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else { - pdata = pdev->dev.platform_data; - } + struct ti_st_plat_data *pdata = pdev->dev.platform_data; if (pdata->resume) return pdata->resume(pdev); @@ -939,8 +860,6 @@ static struct platform_driver kim_platform_driver = { .resume = kim_resume, .driver = { .name = "kim", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(kim_of_match), }, }; diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c index 518e1b7f2f95..93b4d67cc4a3 100644 --- a/drivers/misc/ti-st/st_ll.c +++ b/drivers/misc/ti-st/st_ll.c @@ -26,7 +26,6 @@ #include <linux/ti_wilink_st.h> /**********************************************************************/ - /* internal functions */ static void send_ll_cmd(struct st_data_s *st_data, unsigned char cmd) @@ -54,13 +53,7 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data) /* communicate to platform about chip asleep */ kim_data = st_data->kim_data; - if (kim_data->kim_pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else { - pdata = kim_data->kim_pdev->dev.platform_data; - } - + pdata = kim_data->kim_pdev->dev.platform_data; if (pdata->chip_asleep) pdata->chip_asleep(NULL); } @@ -93,13 +86,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data) /* communicate to platform about chip wakeup */ kim_data = st_data->kim_data; - if (kim_data->kim_pdev->dev.of_node) { - pr_debug("use device tree data"); - pdata = dt_pdata; - } else { - pdata = kim_data->kim_pdev->dev.platform_data; - } - + pdata = kim_data->kim_pdev->dev.platform_data; if (pdata->chip_awake) pdata->chip_awake(NULL); } diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index b00335652e52..87a13374fdc0 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -446,7 +446,6 @@ MODULE_DEVICE_TABLE(i2c, tsl2550_id); static struct i2c_driver tsl2550_driver = { .driver = { .name = TSL2550_DRV_NAME, - .owner = THIS_MODULE, .pm = TSL2550_PM_OPS, }, .probe = tsl2550_probe, diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 191617492181..ffb56340d0c7 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -46,7 +46,7 @@ MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); -MODULE_VERSION("1.2.1.3-k"); +MODULE_VERSION("1.3.0.0-k"); MODULE_ALIAS("dmi:*:svnVMware*:*"); MODULE_ALIAS("vmware_vmmemctl"); MODULE_LICENSE("GPL"); @@ -110,9 +110,18 @@ MODULE_LICENSE("GPL"); */ #define VMW_BALLOON_HV_PORT 0x5670 #define VMW_BALLOON_HV_MAGIC 0x456c6d6f -#define VMW_BALLOON_PROTOCOL_VERSION 2 #define VMW_BALLOON_GUEST_ID 1 /* Linux */ +enum vmwballoon_capabilities { + /* + * Bit 0 is reserved and not associated to any capability. + */ + VMW_BALLOON_BASIC_CMDS = (1 << 1), + VMW_BALLOON_BATCHED_CMDS = (1 << 2) +}; + +#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS) + #define VMW_BALLOON_CMD_START 0 #define VMW_BALLOON_CMD_GET_TARGET 1 #define VMW_BALLOON_CMD_LOCK 2 @@ -120,32 +129,36 @@ MODULE_LICENSE("GPL"); #define VMW_BALLOON_CMD_GUEST_ID 4 /* error codes */ -#define VMW_BALLOON_SUCCESS 0 -#define VMW_BALLOON_FAILURE -1 -#define VMW_BALLOON_ERROR_CMD_INVALID 1 -#define VMW_BALLOON_ERROR_PPN_INVALID 2 -#define VMW_BALLOON_ERROR_PPN_LOCKED 3 -#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 -#define VMW_BALLOON_ERROR_PPN_PINNED 5 -#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 -#define VMW_BALLOON_ERROR_RESET 7 -#define VMW_BALLOON_ERROR_BUSY 8 - -#define VMWARE_BALLOON_CMD(cmd, data, result) \ -({ \ - unsigned long __stat, __dummy1, __dummy2; \ - __asm__ __volatile__ ("inl %%dx" : \ - "=a"(__stat), \ - "=c"(__dummy1), \ - "=d"(__dummy2), \ - "=b"(result) : \ - "0"(VMW_BALLOON_HV_MAGIC), \ - "1"(VMW_BALLOON_CMD_##cmd), \ - "2"(VMW_BALLOON_HV_PORT), \ - "3"(data) : \ - "memory"); \ - result &= -1UL; \ - __stat & -1UL; \ +#define VMW_BALLOON_SUCCESS 0 +#define VMW_BALLOON_FAILURE -1 +#define VMW_BALLOON_ERROR_CMD_INVALID 1 +#define VMW_BALLOON_ERROR_PPN_INVALID 2 +#define VMW_BALLOON_ERROR_PPN_LOCKED 3 +#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4 +#define VMW_BALLOON_ERROR_PPN_PINNED 5 +#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6 +#define VMW_BALLOON_ERROR_RESET 7 +#define VMW_BALLOON_ERROR_BUSY 8 + +#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000) + +#define VMWARE_BALLOON_CMD(cmd, data, result) \ +({ \ + unsigned long __status, __dummy1, __dummy2; \ + __asm__ __volatile__ ("inl %%dx" : \ + "=a"(__status), \ + "=c"(__dummy1), \ + "=d"(__dummy2), \ + "=b"(result) : \ + "0"(VMW_BALLOON_HV_MAGIC), \ + "1"(VMW_BALLOON_CMD_##cmd), \ + "2"(VMW_BALLOON_HV_PORT), \ + "3"(data) : \ + "memory"); \ + if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \ + result = __dummy1; \ + result &= -1UL; \ + __status & -1UL; \ }) #ifdef CONFIG_DEBUG_FS @@ -223,11 +236,12 @@ static struct vmballoon balloon; */ static bool vmballoon_send_start(struct vmballoon *b) { - unsigned long status, dummy; + unsigned long status, capabilities; STATS_INC(b->stats.start); - status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy); + status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_CAPABILITIES, + capabilities); if (status == VMW_BALLOON_SUCCESS) return true; @@ -402,55 +416,37 @@ static void vmballoon_reset(struct vmballoon *b) } /* - * Allocate (or reserve) a page for the balloon and notify the host. If host - * refuses the page put it on "refuse" list and allocate another one until host - * is satisfied. "Refused" pages are released at the end of inflation cycle - * (when we allocate b->rate_alloc pages). + * Notify the host of a ballooned page. If host rejects the page put it on the + * refuse list, those refused page are then released at the end of the + * inflation cycle. */ -static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) +static int vmballoon_lock_page(struct vmballoon *b, struct page *page) { - struct page *page; - gfp_t flags; - unsigned int hv_status; - int locked; - flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP; - - do { - if (!can_sleep) - STATS_INC(b->stats.alloc); - else - STATS_INC(b->stats.sleep_alloc); + int locked, hv_status; - page = alloc_page(flags); - if (!page) { - if (!can_sleep) - STATS_INC(b->stats.alloc_fail); - else - STATS_INC(b->stats.sleep_alloc_fail); - return -ENOMEM; - } + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); + if (locked > 0) { + STATS_INC(b->stats.refused_alloc); - /* inform monitor */ - locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); - if (locked > 0) { - STATS_INC(b->stats.refused_alloc); - - if (hv_status == VMW_BALLOON_ERROR_RESET || - hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { - __free_page(page); - return -EIO; - } + if (hv_status == VMW_BALLOON_ERROR_RESET || + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { + __free_page(page); + return -EIO; + } - /* - * Place page on the list of non-balloonable pages - * and retry allocation, unless we already accumulated - * too many of them, in which case take a breather. - */ + /* + * Place page on the list of non-balloonable pages + * and retry allocation, unless we already accumulated + * too many of them, in which case take a breather. + */ + if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) { + b->n_refused_pages++; list_add(&page->lru, &b->refused_pages); - if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED) - return -EIO; + } else { + __free_page(page); } - } while (locked != 0); + return -EIO; + } /* track allocated page */ list_add(&page->lru, &b->pages); @@ -512,7 +508,7 @@ static void vmballoon_inflate(struct vmballoon *b) unsigned int i; unsigned int allocations = 0; int error = 0; - bool alloc_can_sleep = false; + gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP; pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); @@ -543,19 +539,16 @@ static void vmballoon_inflate(struct vmballoon *b) __func__, goal, rate, b->rate_alloc); for (i = 0; i < goal; i++) { + struct page *page; - error = vmballoon_reserve_page(b, alloc_can_sleep); - if (error) { - if (error != -ENOMEM) { - /* - * Not a page allocation failure, stop this - * cycle. Maybe we'll get new target from - * the host soon. - */ - break; - } + if (flags == VMW_PAGE_ALLOC_NOSLEEP) + STATS_INC(b->stats.alloc); + else + STATS_INC(b->stats.sleep_alloc); - if (alloc_can_sleep) { + page = alloc_page(flags); + if (!page) { + if (flags == VMW_PAGE_ALLOC_CANSLEEP) { /* * CANSLEEP page allocation failed, so guest * is under severe memory pressure. Quickly @@ -563,8 +556,10 @@ static void vmballoon_inflate(struct vmballoon *b) */ b->rate_alloc = max(b->rate_alloc / 2, VMW_BALLOON_RATE_ALLOC_MIN); + STATS_INC(b->stats.sleep_alloc_fail); break; } + STATS_INC(b->stats.alloc_fail); /* * NOSLEEP page allocation failed, so the guest is @@ -579,11 +574,16 @@ static void vmballoon_inflate(struct vmballoon *b) if (i >= b->rate_alloc) break; - alloc_can_sleep = true; + flags = VMW_PAGE_ALLOC_CANSLEEP; /* Lower rate for sleeping allocations. */ rate = b->rate_alloc; + continue; } + error = vmballoon_lock_page(b, page); + if (error) + break; + if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { cond_resched(); allocations = 0; diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index a721b5d8a9da..9ec262a52656 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -1031,14 +1031,9 @@ int __init vmci_host_init(void) void __exit vmci_host_exit(void) { - int error; - vmci_host_device_initialized = false; - error = misc_deregister(&vmci_host_miscdev); - if (error) - pr_warn("Error unregistering character device: %d\n", error); - + misc_deregister(&vmci_host_miscdev); vmci_ctx_destroy(host_context); vmci_qp_broker_exit(); diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c index 2b77ccf77f81..754a9bb0f58d 100644 --- a/drivers/nfc/mei_phy.c +++ b/drivers/nfc/mei_phy.c @@ -355,7 +355,8 @@ static int nfc_mei_phy_enable(void *phy_id) goto err; } - r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); + r = mei_cl_register_event_cb(phy->device, BIT(MEI_CL_EVENT_RX), + nfc_mei_event_cb, phy); if (r) { pr_err("Event cb registration failed %d\n", r); goto err; diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig new file mode 100644 index 000000000000..8db297821f78 --- /dev/null +++ b/drivers/nvmem/Kconfig @@ -0,0 +1,39 @@ +menuconfig NVMEM + tristate "NVMEM Support" + select REGMAP + help + Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... + + This framework is designed to provide a generic interface to NVMEM + from both the Linux Kernel and the userspace. + + This driver can also be built as a module. If so, the module + will be called nvmem_core. + + If unsure, say no. + +if NVMEM + +config QCOM_QFPROM + tristate "QCOM QFPROM Support" + depends on ARCH_QCOM || COMPILE_TEST + select REGMAP_MMIO + help + Say y here to enable QFPROM support. The QFPROM provides access + functions for QFPROM data to rest of the drivers via nvmem interface. + + This driver can also be built as a module. If so, the module + will be called nvmem_qfprom. + +config NVMEM_SUNXI_SID + tristate "Allwinner SoCs SID support" + depends on ARCH_SUNXI + select REGMAP_MMIO + help + This is a driver for the 'security ID' available on various Allwinner + devices. + + This driver can also be built as a module. If so, the module + will be called nvmem_sunxi_sid. + +endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile new file mode 100644 index 000000000000..4328b930ad9a --- /dev/null +++ b/drivers/nvmem/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for nvmem drivers. +# + +obj-$(CONFIG_NVMEM) += nvmem_core.o +nvmem_core-y := core.o + +# Devices +obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o +nvmem_qfprom-y := qfprom.o +obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o +nvmem_sunxi_sid-y := sunxi_sid.o diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c new file mode 100644 index 000000000000..d3c6676b3c0c --- /dev/null +++ b/drivers/nvmem/core.c @@ -0,0 +1,1083 @@ +/* + * nvmem framework core. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +struct nvmem_device { + const char *name; + struct regmap *regmap; + struct module *owner; + struct device dev; + int stride; + int word_size; + int ncells; + int id; + int users; + size_t size; + bool read_only; +}; + +struct nvmem_cell { + const char *name; + int offset; + int bytes; + int bit_offset; + int nbits; + struct nvmem_device *nvmem; + struct list_head node; +}; + +static DEFINE_MUTEX(nvmem_mutex); +static DEFINE_IDA(nvmem_ida); + +static LIST_HEAD(nvmem_cells); +static DEFINE_MUTEX(nvmem_cells_mutex); + +#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) + +static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvmem_device *nvmem = to_nvmem_device(dev); + int rc; + + /* Stop the user from reading */ + if (pos > nvmem->size) + return 0; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = regmap_raw_read(nvmem->regmap, pos, buf, count); + + if (IS_ERR_VALUE(rc)) + return rc; + + return count; +} + +static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t pos, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvmem_device *nvmem = to_nvmem_device(dev); + int rc; + + /* Stop the user from writing */ + if (pos > nvmem->size) + return 0; + + if (pos + count > nvmem->size) + count = nvmem->size - pos; + + count = round_down(count, nvmem->word_size); + + rc = regmap_raw_write(nvmem->regmap, pos, buf, count); + + if (IS_ERR_VALUE(rc)) + return rc; + + return count; +} + +/* default read/write permissions */ +static struct bin_attribute bin_attr_rw_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IWUSR | S_IRUGO, + }, + .read = bin_attr_nvmem_read, + .write = bin_attr_nvmem_write, +}; + +static struct bin_attribute *nvmem_bin_rw_attributes[] = { + &bin_attr_rw_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_rw_group = { + .bin_attrs = nvmem_bin_rw_attributes, +}; + +static const struct attribute_group *nvmem_rw_dev_groups[] = { + &nvmem_bin_rw_group, + NULL, +}; + +/* read only permission */ +static struct bin_attribute bin_attr_ro_nvmem = { + .attr = { + .name = "nvmem", + .mode = S_IRUGO, + }, + .read = bin_attr_nvmem_read, +}; + +static struct bin_attribute *nvmem_bin_ro_attributes[] = { + &bin_attr_ro_nvmem, + NULL, +}; + +static const struct attribute_group nvmem_bin_ro_group = { + .bin_attrs = nvmem_bin_ro_attributes, +}; + +static const struct attribute_group *nvmem_ro_dev_groups[] = { + &nvmem_bin_ro_group, + NULL, +}; + +static void nvmem_release(struct device *dev) +{ + struct nvmem_device *nvmem = to_nvmem_device(dev); + + ida_simple_remove(&nvmem_ida, nvmem->id); + kfree(nvmem); +} + +static const struct device_type nvmem_provider_type = { + .release = nvmem_release, +}; + +static struct bus_type nvmem_bus_type = { + .name = "nvmem", +}; + +static int of_nvmem_match(struct device *dev, void *nvmem_np) +{ + return dev->of_node == nvmem_np; +} + +static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) +{ + struct device *d; + + if (!nvmem_np) + return NULL; + + d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); + + if (!d) + return NULL; + + return to_nvmem_device(d); +} + +static struct nvmem_cell *nvmem_find_cell(const char *cell_id) +{ + struct nvmem_cell *p; + + list_for_each_entry(p, &nvmem_cells, node) + if (p && !strcmp(p->name, cell_id)) + return p; + + return NULL; +} + +static void nvmem_cell_drop(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_cells_mutex); + list_del(&cell->node); + mutex_unlock(&nvmem_cells_mutex); + kfree(cell); +} + +static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) +{ + struct nvmem_cell *cell; + struct list_head *p, *n; + + list_for_each_safe(p, n, &nvmem_cells) { + cell = list_entry(p, struct nvmem_cell, node); + if (cell->nvmem == nvmem) + nvmem_cell_drop(cell); + } +} + +static void nvmem_cell_add(struct nvmem_cell *cell) +{ + mutex_lock(&nvmem_cells_mutex); + list_add_tail(&cell->node, &nvmem_cells); + mutex_unlock(&nvmem_cells_mutex); +} + +static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + struct nvmem_cell *cell) +{ + cell->nvmem = nvmem; + cell->offset = info->offset; + cell->bytes = info->bytes; + cell->name = info->name; + + cell->bit_offset = info->bit_offset; + cell->nbits = info->nbits; + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + return -EINVAL; + } + + return 0; +} + +static int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_config *cfg) +{ + struct nvmem_cell **cells; + const struct nvmem_cell_info *info = cfg->cells; + int i, rval; + + cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); + if (!cells) + return -ENOMEM; + + for (i = 0; i < cfg->ncells; i++) { + cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); + if (!cells[i]) { + rval = -ENOMEM; + goto err; + } + + rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); + if (IS_ERR_VALUE(rval)) { + kfree(cells[i]); + goto err; + } + + nvmem_cell_add(cells[i]); + } + + nvmem->ncells = cfg->ncells; + /* remove tmp array */ + kfree(cells); + + return 0; +err: + while (--i) + nvmem_cell_drop(cells[i]); + + return rval; +} + +/** + * nvmem_register() - Register a nvmem device for given nvmem_config. + * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem + * + * @config: nvmem device configuration with which nvmem device is created. + * + * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device + * on success. + */ + +struct nvmem_device *nvmem_register(const struct nvmem_config *config) +{ + struct nvmem_device *nvmem; + struct device_node *np; + struct regmap *rm; + int rval; + + if (!config->dev) + return ERR_PTR(-EINVAL); + + rm = dev_get_regmap(config->dev, NULL); + if (!rm) { + dev_err(config->dev, "Regmap not found\n"); + return ERR_PTR(-EINVAL); + } + + nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); + if (!nvmem) + return ERR_PTR(-ENOMEM); + + rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); + if (rval < 0) { + kfree(nvmem); + return ERR_PTR(rval); + } + + nvmem->id = rval; + nvmem->regmap = rm; + nvmem->owner = config->owner; + nvmem->stride = regmap_get_reg_stride(rm); + nvmem->word_size = regmap_get_val_bytes(rm); + nvmem->size = regmap_get_max_register(rm) + nvmem->stride; + nvmem->dev.type = &nvmem_provider_type; + nvmem->dev.bus = &nvmem_bus_type; + nvmem->dev.parent = config->dev; + np = config->dev->of_node; + nvmem->dev.of_node = np; + dev_set_name(&nvmem->dev, "%s%d", + config->name ? : "nvmem", config->id); + + nvmem->read_only = of_property_read_bool(np, "read-only") | + config->read_only; + + nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups : + nvmem_rw_dev_groups; + + device_initialize(&nvmem->dev); + + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + + rval = device_add(&nvmem->dev); + if (rval) { + ida_simple_remove(&nvmem_ida, nvmem->id); + kfree(nvmem); + return ERR_PTR(rval); + } + + if (config->cells) + nvmem_add_cells(nvmem, config); + + return nvmem; +} +EXPORT_SYMBOL_GPL(nvmem_register); + +/** + * nvmem_unregister() - Unregister previously registered nvmem device + * + * @nvmem: Pointer to previously registered nvmem device. + * + * Return: Will be an negative on error or a zero on success. + */ +int nvmem_unregister(struct nvmem_device *nvmem) +{ + mutex_lock(&nvmem_mutex); + if (nvmem->users) { + mutex_unlock(&nvmem_mutex); + return -EBUSY; + } + mutex_unlock(&nvmem_mutex); + + nvmem_device_remove_all_cells(nvmem); + device_del(&nvmem->dev); + + return 0; +} +EXPORT_SYMBOL_GPL(nvmem_unregister); + +static struct nvmem_device *__nvmem_device_get(struct device_node *np, + struct nvmem_cell **cellp, + const char *cell_id) +{ + struct nvmem_device *nvmem = NULL; + + mutex_lock(&nvmem_mutex); + + if (np) { + nvmem = of_nvmem_find(np); + if (!nvmem) { + mutex_unlock(&nvmem_mutex); + return ERR_PTR(-EPROBE_DEFER); + } + } else { + struct nvmem_cell *cell = nvmem_find_cell(cell_id); + + if (cell) { + nvmem = cell->nvmem; + *cellp = cell; + } + + if (!nvmem) { + mutex_unlock(&nvmem_mutex); + return ERR_PTR(-ENOENT); + } + } + + nvmem->users++; + mutex_unlock(&nvmem_mutex); + + if (!try_module_get(nvmem->owner)) { + dev_err(&nvmem->dev, + "could not increase module refcount for cell %s\n", + nvmem->name); + + mutex_lock(&nvmem_mutex); + nvmem->users--; + mutex_unlock(&nvmem_mutex); + + return ERR_PTR(-EINVAL); + } + + return nvmem; +} + +static void __nvmem_device_put(struct nvmem_device *nvmem) +{ + module_put(nvmem->owner); + mutex_lock(&nvmem_mutex); + nvmem->users--; + mutex_unlock(&nvmem_mutex); +} + +static int nvmem_match(struct device *dev, void *data) +{ + return !strcmp(dev_name(dev), data); +} + +static struct nvmem_device *nvmem_find(const char *name) +{ + struct device *d; + + d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); + + if (!d) + return NULL; + + return to_nvmem_device(d); +} + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_device_get() - Get nvmem device from a given id + * + * @dev node: Device tree node that uses the nvmem device + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) +{ + + struct device_node *nvmem_np; + int index; + + index = of_property_match_string(np, "nvmem-names", id); + + nvmem_np = of_parse_phandle(np, "nvmem", index); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + return __nvmem_device_get(nvmem_np, NULL, NULL); +} +EXPORT_SYMBOL_GPL(of_nvmem_device_get); +#endif + +/** + * nvmem_device_get() - Get nvmem device from a given id + * + * @dev : Device that uses the nvmem device + * @id: nvmem name from nvmem-names property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device + * on success. + */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) +{ + if (dev->of_node) { /* try dt first */ + struct nvmem_device *nvmem; + + nvmem = of_nvmem_device_get(dev->of_node, dev_name); + + if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) + return nvmem; + + } + + return nvmem_find(dev_name); +} +EXPORT_SYMBOL_GPL(nvmem_device_get); + +static int devm_nvmem_device_match(struct device *dev, void *res, void *data) +{ + struct nvmem_device **nvmem = res; + + if (WARN_ON(!nvmem || !*nvmem)) + return 0; + + return *nvmem == data; +} + +static void devm_nvmem_device_release(struct device *dev, void *res) +{ + nvmem_device_put(*(struct nvmem_device **)res); +} + +/** + * devm_nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), + * that needs to be released. + */ +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_device_release, + devm_nvmem_device_match, nvmem); + + WARN_ON(ret); +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_put); + +/** + * nvmem_device_put() - put alredy got nvmem device + * + * @nvmem: pointer to nvmem device that needs to be released. + */ +void nvmem_device_put(struct nvmem_device *nvmem) +{ + __nvmem_device_put(nvmem); +} +EXPORT_SYMBOL_GPL(nvmem_device_put); + +/** + * devm_nvmem_device_get() - Get nvmem cell of device form a given id + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem name in nvmems property. + * + * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell + * on success. The nvmem_cell will be freed by the automatically once the + * device is freed. + */ +struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) +{ + struct nvmem_device **ptr, *nvmem; + + ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + nvmem = nvmem_device_get(dev, id); + if (!IS_ERR(nvmem)) { + *ptr = nvmem; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return nvmem; +} +EXPORT_SYMBOL_GPL(devm_nvmem_device_get); + +static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) +{ + struct nvmem_cell *cell = NULL; + struct nvmem_device *nvmem; + + nvmem = __nvmem_device_get(NULL, &cell, cell_id); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + return cell; +} + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) +/** + * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem cell name from nvmem-cell-names property. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name) +{ + struct device_node *cell_np, *nvmem_np; + struct nvmem_cell *cell; + struct nvmem_device *nvmem; + const __be32 *addr; + int rval, len, index; + + index = of_property_match_string(np, "nvmem-cell-names", name); + + cell_np = of_parse_phandle(np, "nvmem-cells", index); + if (!cell_np) + return ERR_PTR(-EINVAL); + + nvmem_np = of_get_next_parent(cell_np); + if (!nvmem_np) + return ERR_PTR(-EINVAL); + + nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); + if (IS_ERR(nvmem)) + return ERR_CAST(nvmem); + + addr = of_get_property(cell_np, "reg", &len); + if (!addr || (len < 2 * sizeof(u32))) { + dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n", + cell_np->full_name); + rval = -EINVAL; + goto err_mem; + } + + cell = kzalloc(sizeof(*cell), GFP_KERNEL); + if (!cell) { + rval = -ENOMEM; + goto err_mem; + } + + cell->nvmem = nvmem; + cell->offset = be32_to_cpup(addr++); + cell->bytes = be32_to_cpup(addr); + cell->name = cell_np->name; + + addr = of_get_property(cell_np, "bits", &len); + if (addr && len == (2 * sizeof(u32))) { + cell->bit_offset = be32_to_cpup(addr++); + cell->nbits = be32_to_cpup(addr); + } + + if (cell->nbits) + cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, + BITS_PER_BYTE); + + if (!IS_ALIGNED(cell->offset, nvmem->stride)) { + dev_err(&nvmem->dev, + "cell %s unaligned to nvmem stride %d\n", + cell->name, nvmem->stride); + rval = -EINVAL; + goto err_sanity; + } + + nvmem_cell_add(cell); + + return cell; + +err_sanity: + kfree(cell); + +err_mem: + __nvmem_device_put(nvmem); + + return ERR_PTR(rval); +} +EXPORT_SYMBOL_GPL(of_nvmem_cell_get); +#endif + +/** + * nvmem_cell_get() - Get nvmem cell of device form a given cell name + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem cell name to get. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * nvmem_cell_put(). + */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) +{ + struct nvmem_cell *cell; + + if (dev->of_node) { /* try dt first */ + cell = of_nvmem_cell_get(dev->of_node, cell_id); + if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) + return cell; + } + + return nvmem_cell_get_from_list(cell_id); +} +EXPORT_SYMBOL_GPL(nvmem_cell_get); + +static void devm_nvmem_cell_release(struct device *dev, void *res) +{ + nvmem_cell_put(*(struct nvmem_cell **)res); +} + +/** + * devm_nvmem_cell_get() - Get nvmem cell of device form a given id + * + * @dev node: Device tree node that uses the nvmem cell + * @id: nvmem id in nvmem-names property. + * + * Return: Will be an ERR_PTR() on error or a valid pointer + * to a struct nvmem_cell. The nvmem_cell will be freed by the + * automatically once the device is freed. + */ +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) +{ + struct nvmem_cell **ptr, *cell; + + ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + cell = nvmem_cell_get(dev, id); + if (!IS_ERR(cell)) { + *ptr = cell; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return cell; +} +EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); + +static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) +{ + struct nvmem_cell **c = res; + + if (WARN_ON(!c || !*c)) + return 0; + + return *c == data; +} + +/** + * devm_nvmem_cell_put() - Release previously allocated nvmem cell + * from devm_nvmem_cell_get. + * + * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get() + */ +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) +{ + int ret; + + ret = devres_release(dev, devm_nvmem_cell_release, + devm_nvmem_cell_match, cell); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_nvmem_cell_put); + +/** + * nvmem_cell_put() - Release previously allocated nvmem cell. + * + * @cell: Previously allocated nvmem cell by nvmem_cell_get() + */ +void nvmem_cell_put(struct nvmem_cell *cell) +{ + struct nvmem_device *nvmem = cell->nvmem; + + __nvmem_device_put(nvmem); + nvmem_cell_drop(cell); +} +EXPORT_SYMBOL_GPL(nvmem_cell_put); + +static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, + void *buf) +{ + u8 *p, *b; + int i, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { + /* First shift */ + *b++ >>= bit_offset; + + /* setup rest of the bytes if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get bits from next byte and shift them towards msb */ + *p |= *b << (BITS_PER_BYTE - bit_offset); + + p = b; + *b++ >>= bit_offset; + } + + /* result fits in less bytes */ + if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) + *p-- = 0; + } + /* clear msb bits if any leftover in the last byte */ + *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); +} + +static int __nvmem_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell *cell, + void *buf, size_t *len) +{ + int rc; + + rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes); + + if (IS_ERR_VALUE(rc)) + return rc; + + /* shift bits in-place */ + if (cell->bit_offset || cell->bit_offset) + nvmem_shift_read_buffer_in_place(cell, buf); + + *len = cell->bytes; + + return 0; +} + +/** + * nvmem_cell_read() - Read a given nvmem cell + * + * @cell: nvmem cell to be read. + * @len: pointer to length of cell which will be populated on successful read. + * + * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success. + * The buffer should be freed by the consumer with a kfree(). + */ +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + struct nvmem_device *nvmem = cell->nvmem; + u8 *buf; + int rc; + + if (!nvmem || !nvmem->regmap) + return ERR_PTR(-EINVAL); + + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + rc = __nvmem_cell_read(nvmem, cell, buf, len); + if (IS_ERR_VALUE(rc)) { + kfree(buf); + return ERR_PTR(rc); + } + + return buf; +} +EXPORT_SYMBOL_GPL(nvmem_cell_read); + +static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, + u8 *_buf, int len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int i, rc, nbits, bit_offset = cell->bit_offset; + u8 v, *p, *buf, *b, pbyte, pbits; + + nbits = cell->nbits; + buf = kzalloc(cell->bytes, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + memcpy(buf, _buf, len); + p = b = buf; + + if (bit_offset) { + pbyte = *b; + *b <<= bit_offset; + + /* setup the first byte with lsb bits from nvmem */ + rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1); + *b++ |= GENMASK(bit_offset - 1, 0) & v; + + /* setup rest of the byte if any */ + for (i = 1; i < cell->bytes; i++) { + /* Get last byte bits and shift them towards lsb */ + pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); + pbyte = *b; + p = b; + *b <<= bit_offset; + *b++ |= pbits; + } + } + + /* if it's not end on byte boundary */ + if ((nbits + bit_offset) % BITS_PER_BYTE) { + /* setup the last byte with msb bits from nvmem */ + rc = regmap_raw_read(nvmem->regmap, + cell->offset + cell->bytes - 1, &v, 1); + *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; + + } + + return buf; +} + +/** + * nvmem_cell_write() - Write to a given nvmem cell + * + * @cell: nvmem cell to be written. + * @buf: Buffer to be written. + * @len: length of buffer to be written to nvmem cell. + * + * Return: length of bytes written or negative on failure. + */ +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) +{ + struct nvmem_device *nvmem = cell->nvmem; + int rc; + + if (!nvmem || !nvmem->regmap || nvmem->read_only || + (cell->bit_offset == 0 && len != cell->bytes)) + return -EINVAL; + + if (cell->bit_offset || cell->nbits) { + buf = nvmem_cell_prepare_write_buffer(cell, buf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + } + + rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); + + /* free the tmp buffer */ + if (cell->bit_offset) + kfree(buf); + + if (IS_ERR_VALUE(rc)) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_cell_write); + +/** + * nvmem_device_cell_read() - Read a given nvmem device and cell + * + * @nvmem: nvmem device to read from. + * @info: nvmem cell info to be read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + ssize_t len; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + if (IS_ERR_VALUE(rc)) + return rc; + + rc = __nvmem_cell_read(nvmem, &cell, buf, &len); + if (IS_ERR_VALUE(rc)) + return rc; + + return len; +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_read); + +/** + * nvmem_device_cell_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @info: nvmem cell info to be written + * @buf: buffer to be written to cell. + * + * Return: length of bytes written or negative error code on failure. + * */ +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf) +{ + struct nvmem_cell cell; + int rc; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); + if (IS_ERR_VALUE(rc)) + return rc; + + return nvmem_cell_write(&cell, buf, cell.bytes); +} +EXPORT_SYMBOL_GPL(nvmem_device_cell_write); + +/** + * nvmem_device_read() - Read from a given nvmem device + * + * @nvmem: nvmem device to read from. + * @offset: offset in nvmem device. + * @bytes: number of bytes to read. + * @buf: buffer pointer which will be populated on successful read. + * + * Return: length of successful bytes read on success and negative + * error code on error. + */ +int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes); + + if (IS_ERR_VALUE(rc)) + return rc; + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_read); + +/** + * nvmem_device_write() - Write cell to a given nvmem device + * + * @nvmem: nvmem device to be written to. + * @offset: offset in nvmem device. + * @bytes: number of bytes to write. + * @buf: buffer to be written. + * + * Return: length of bytes written or negative error code on failure. + * */ +int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, + size_t bytes, void *buf) +{ + int rc; + + if (!nvmem || !nvmem->regmap) + return -EINVAL; + + rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes); + + if (IS_ERR_VALUE(rc)) + return rc; + + + return bytes; +} +EXPORT_SYMBOL_GPL(nvmem_device_write); + +static int __init nvmem_init(void) +{ + return bus_register(&nvmem_bus_type); +} + +static void __exit nvmem_exit(void) +{ + bus_unregister(&nvmem_bus_type); +} + +subsys_initcall(nvmem_init); +module_exit(nvmem_exit); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("nvmem Driver Core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c new file mode 100644 index 000000000000..afb67e7eeee4 --- /dev/null +++ b/drivers/nvmem/qfprom.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +static struct regmap_config qfprom_regmap_config = { + .reg_bits = 32, + .val_bits = 8, + .reg_stride = 1, +}; + +static struct nvmem_config econfig = { + .name = "qfprom", + .owner = THIS_MODULE, +}; + +static int qfprom_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static int qfprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct regmap *regmap; + void __iomem *base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + qfprom_regmap_config.max_register = resource_size(res) - 1; + + regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(regmap); + } + econfig.dev = dev; + nvmem = nvmem_register(&econfig); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static const struct of_device_id qfprom_of_match[] = { + { .compatible = "qcom,qfprom",}, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, qfprom_of_match); + +static struct platform_driver qfprom_driver = { + .probe = qfprom_probe, + .remove = qfprom_remove, + .driver = { + .name = "qcom,qfprom", + .of_match_table = qfprom_of_match, + }, +}; +module_platform_driver(qfprom_driver); +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm QFPROM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c new file mode 100644 index 000000000000..14777dd5212d --- /dev/null +++ b/drivers/nvmem/sunxi_sid.c @@ -0,0 +1,171 @@ +/* + * Allwinner sunXi SoCs Security ID support. + * + * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> + * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/random.h> + + +static struct nvmem_config econfig = { + .name = "sunxi-sid", + .read_only = true, + .owner = THIS_MODULE, +}; + +struct sunxi_sid { + void __iomem *base; +}; + +/* We read the entire key, due to a 32 bit read alignment requirement. Since we + * want to return the requested byte, this results in somewhat slower code and + * uses 4 times more reads as needed but keeps code simpler. Since the SID is + * only very rarely probed, this is not really an issue. + */ +static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid, + const unsigned int offset) +{ + u32 sid_key; + + sid_key = ioread32be(sid->base + round_down(offset, 4)); + sid_key >>= (offset % 4) * 8; + + return sid_key; /* Only return the last byte */ +} + +static int sunxi_sid_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct sunxi_sid *sid = context; + unsigned int offset = *(u32 *)reg; + u8 *buf = val; + + while (val_size) { + *buf++ = sunxi_sid_read_byte(sid, offset); + val_size--; + offset++; + } + + return 0; +} + +static int sunxi_sid_write(void *context, const void *data, size_t count) +{ + /* Unimplemented, dummy to keep regmap core happy */ + return 0; +} + +static struct regmap_bus sunxi_sid_bus = { + .read = sunxi_sid_read, + .write = sunxi_sid_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg) +{ + return false; +} + +static struct regmap_config sunxi_sid_regmap_config = { + .reg_bits = 32, + .val_bits = 8, + .reg_stride = 1, + .writeable_reg = sunxi_sid_writeable_reg, +}; + +static int sunxi_sid_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct nvmem_device *nvmem; + struct regmap *regmap; + struct sunxi_sid *sid; + int i, size; + char *randomness; + + sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); + if (!sid) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sid->base = devm_ioremap_resource(dev, res); + if (IS_ERR(sid->base)) + return PTR_ERR(sid->base); + + size = resource_size(res) - 1; + sunxi_sid_regmap_config.max_register = size; + + regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid, + &sunxi_sid_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(regmap); + } + + econfig.dev = dev; + nvmem = nvmem_register(&econfig); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); + for (i = 0; i < size; i++) + randomness[i] = sunxi_sid_read_byte(sid, i); + + add_device_randomness(randomness, size); + kfree(randomness); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static int sunxi_sid_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static const struct of_device_id sunxi_sid_of_match[] = { + { .compatible = "allwinner,sun4i-a10-sid" }, + { .compatible = "allwinner,sun7i-a20-sid" }, + {/* sentinel */}, +}; +MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); + +static struct platform_driver sunxi_sid_driver = { + .probe = sunxi_sid_probe, + .remove = sunxi_sid_remove, + .driver = { + .name = "eeprom-sunxi-sid", + .of_match_table = sunxi_sid_of_match, + }, +}; +module_platform_driver(sunxi_sid_driver); + +MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>"); +MODULE_DESCRIPTION("Allwinner sunxi security id driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 167783fa7ac1..72c933375233 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -666,9 +666,8 @@ static int ds1374_remove(struct i2c_client *client) #ifdef CONFIG_RTC_DRV_DS1374_WDT int res; - res = misc_deregister(&ds1374_miscdev); - if (!res) - ds1374_miscdev.parent = NULL; + misc_deregister(&ds1374_miscdev); + ds1374_miscdev.parent = NULL; unregister_reboot_notifier(&ds1374_wdt_notifier); #endif diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig index 982580af1d16..0d3b70b3bda8 100644 --- a/drivers/spmi/Kconfig +++ b/drivers/spmi/Kconfig @@ -12,7 +12,7 @@ if SPMI config SPMI_MSM_PMIC_ARB tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)" - depends on IRQ_DOMAIN + select IRQ_DOMAIN depends on ARCH_QCOM || COMPILE_TEST depends on HAS_IOMEM default ARCH_QCOM diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index d7119db49cfe..a4d8c043a710 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -575,6 +575,22 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) return 0; } +static int qpnpint_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + u8 irq = d->hwirq >> 8; + u8 status = 0; + + if (which != IRQCHIP_STATE_LINE_LEVEL) + return -EINVAL; + + qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1); + *state = !!(status & BIT(irq)); + + return 0; +} + static struct irq_chip pmic_arb_irqchip = { .name = "pmic_arb", .irq_enable = qpnpint_irq_enable, @@ -582,6 +598,7 @@ static struct irq_chip pmic_arb_irqchip = { .irq_mask = qpnpint_irq_mask, .irq_unmask = qpnpint_irq_unmask, .irq_set_type = qpnpint_irq_set_type, + .irq_get_irqchip_state = qpnpint_get_irqchip_state, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index 94938436aef9..11467e17bdd8 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -22,6 +22,8 @@ #include <linux/pm_runtime.h> #include <dt-bindings/spmi/spmi.h> +#define CREATE_TRACE_POINTS +#include <trace/events/spmi.h> static DEFINE_IDA(ctrl_ida); @@ -96,28 +98,42 @@ EXPORT_SYMBOL_GPL(spmi_device_remove); static inline int spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid) { + int ret; + if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type) return -EINVAL; - return ctrl->cmd(ctrl, opcode, sid); + ret = ctrl->cmd(ctrl, opcode, sid); + trace_spmi_cmd(opcode, sid, ret); + return ret; } static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid, u16 addr, u8 *buf, size_t len) { + int ret; + if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type) return -EINVAL; - return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len); + trace_spmi_read_begin(opcode, sid, addr); + ret = ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len); + trace_spmi_read_end(opcode, sid, addr, ret, len, buf); + return ret; } static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid, u16 addr, const u8 *buf, size_t len) { + int ret; + if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type) return -EINVAL; - return ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len); + trace_spmi_write_begin(opcode, sid, addr, len, buf); + ret = ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len); + trace_spmi_write_end(opcode, sid, addr, ret); + return ret; } /** diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index c5c037ccf32c..2c75d90b26c8 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -863,14 +863,9 @@ static int __init ashmem_init(void) static void __exit ashmem_exit(void) { - int ret; - unregister_shrinker(&ashmem_shrinker); - ret = misc_deregister(&ashmem_misc); - if (unlikely(ret)) - pr_err("failed to unregister misc device!\n"); - + misc_deregister(&ashmem_misc); kmem_cache_destroy(ashmem_range_cachep); kmem_cache_destroy(ashmem_area_cachep); diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c index 7d6e6b6bc894..b8dcf5a26cc4 100644 --- a/drivers/staging/android/ion/ion_test.c +++ b/drivers/staging/android/ion/ion_test.c @@ -269,7 +269,8 @@ static int ion_test_remove(struct platform_device *pdev) if (!testdev) return -ENODATA; - return misc_deregister(&testdev->misc); + misc_deregister(&testdev->misc); + return 0; } static struct platform_device *ion_test_pdev; diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c index e60b2e9b9194..e7074006e41b 100644 --- a/drivers/staging/lustre/lustre/libcfs/module.c +++ b/drivers/staging/lustre/lustre/libcfs/module.c @@ -467,9 +467,7 @@ static void exit_libcfs_module(void) cfs_crypto_unregister(); cfs_wi_shutdown(); - rc = misc_deregister(&libcfs_dev); - if (rc) - CERROR("misc_deregister error %d\n", rc); + misc_deregister(&libcfs_dev); cfs_cpu_fini(); diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig index 48fb1d983f6c..52c98ce1b6fe 100644 --- a/drivers/uio/Kconfig +++ b/drivers/uio/Kconfig @@ -127,7 +127,7 @@ config UIO_FSL_ELBC_GPCM_NETX5152 config UIO_PRUSS tristate "Texas Instruments PRUSS driver" select GENERIC_ALLOCATOR - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA help PRUSS driver for OMAPL138/DA850/AM18XX devices PRUSS driver requires user space components, examples and user space diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 3257d4220d01..8196581f54c2 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -896,6 +896,7 @@ static int __init uio_init(void) static void __exit uio_exit(void) { release_uio_class(); + idr_destroy(&uio_idr); } module_init(uio_init) diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c index b6cac91c2ced..2bcf80c159c1 100644 --- a/drivers/uio/uio_fsl_elbc_gpcm.c +++ b/drivers/uio/uio_fsl_elbc_gpcm.c @@ -480,19 +480,7 @@ static struct platform_driver uio_fsl_elbc_gpcm_driver = { .probe = uio_fsl_elbc_gpcm_probe, .remove = uio_fsl_elbc_gpcm_remove, }; - -static int __init uio_fsl_elbc_gpcm_init(void) -{ - return platform_driver_register(&uio_fsl_elbc_gpcm_driver); -} - -static void __exit uio_fsl_elbc_gpcm_exit(void) -{ - platform_driver_unregister(&uio_fsl_elbc_gpcm_driver); -} - -module_init(uio_fsl_elbc_gpcm_init); -module_exit(uio_fsl_elbc_gpcm_exit); +module_platform_driver(uio_fsl_elbc_gpcm_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Ogness <john.ogness@linutronix.de>"); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index dfcc02c93648..f114a9dbb48f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1573,9 +1573,9 @@ static int __init vhost_scsi_register(void) return misc_register(&vhost_scsi_misc); } -static int vhost_scsi_deregister(void) +static void vhost_scsi_deregister(void) { - return misc_deregister(&vhost_scsi_misc); + misc_deregister(&vhost_scsi_misc); } static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport) diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 807ee22ef229..e2451bdb4525 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -213,7 +213,7 @@ struct synthvid_msg { struct hvfb_par { struct fb_info *info; - struct resource mem; + struct resource *mem; bool fb_ready; /* fb device is ready */ struct completion wait; u32 synthvid_version; @@ -677,26 +677,18 @@ static void hvfb_get_option(struct fb_info *info) /* Get framebuffer memory from Hyper-V video pci space */ -static int hvfb_getmem(struct fb_info *info) +static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) { struct hvfb_par *par = info->par; struct pci_dev *pdev = NULL; void __iomem *fb_virt; int gen2vm = efi_enabled(EFI_BOOT); + resource_size_t pot_start, pot_end; int ret; - par->mem.name = KBUILD_MODNAME; - par->mem.flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (gen2vm) { - ret = allocate_resource(&hyperv_mmio, &par->mem, - screen_fb_size, - 0, -1, - screen_fb_size, - NULL, NULL); - if (ret != 0) { - pr_err("Unable to allocate framebuffer memory\n"); - return -ENODEV; - } + pot_start = 0; + pot_end = -1; } else { pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, PCI_DEVICE_ID_HYPERV_VIDEO, NULL); @@ -709,16 +701,18 @@ static int hvfb_getmem(struct fb_info *info) pci_resource_len(pdev, 0) < screen_fb_size) goto err1; - par->mem.end = pci_resource_end(pdev, 0); - par->mem.start = par->mem.end - screen_fb_size + 1; - ret = request_resource(&pdev->resource[0], &par->mem); - if (ret != 0) { - pr_err("Unable to request framebuffer memory\n"); - goto err1; - } + pot_end = pci_resource_end(pdev, 0); + pot_start = pot_end - screen_fb_size + 1; + } + + ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end, + screen_fb_size, 0x100000, true); + if (ret != 0) { + pr_err("Unable to allocate framebuffer memory\n"); + goto err1; } - fb_virt = ioremap(par->mem.start, screen_fb_size); + fb_virt = ioremap(par->mem->start, screen_fb_size); if (!fb_virt) goto err2; @@ -736,7 +730,7 @@ static int hvfb_getmem(struct fb_info *info) info->apertures->ranges[0].size = pci_resource_len(pdev, 0); } - info->fix.smem_start = par->mem.start; + info->fix.smem_start = par->mem->start; info->fix.smem_len = screen_fb_size; info->screen_base = fb_virt; info->screen_size = screen_fb_size; @@ -749,7 +743,8 @@ static int hvfb_getmem(struct fb_info *info) err3: iounmap(fb_virt); err2: - release_resource(&par->mem); + release_mem_region(par->mem->start, screen_fb_size); + par->mem = NULL; err1: if (!gen2vm) pci_dev_put(pdev); @@ -763,7 +758,8 @@ static void hvfb_putmem(struct fb_info *info) struct hvfb_par *par = info->par; iounmap(info->screen_base); - release_resource(&par->mem); + release_mem_region(par->mem->start, screen_fb_size); + par->mem = NULL; } @@ -794,7 +790,7 @@ static int hvfb_probe(struct hv_device *hdev, goto error1; } - ret = hvfb_getmem(info); + ret = hvfb_getmem(hdev, info); if (ret) { pr_err("No memory for framebuffer\n"); goto error2; diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index a674409edfb3..b05e8fefbabd 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c @@ -97,7 +97,6 @@ MODULE_DEVICE_TABLE(i2c, ds2482_id); static struct i2c_driver ds2482_driver = { .driver = { - .owner = THIS_MODULE, .name = "ds2482", }, .probe = ds2482_probe, diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index d8667b0212d7..684bc9d861cb 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c @@ -232,16 +232,4 @@ static void matrox_w1_remove(struct pci_dev *pdev) } kfree(dev); } - -static int __init matrox_w1_init(void) -{ - return pci_register_driver(&matrox_w1_pci_driver); -} - -static void __exit matrox_w1_fini(void) -{ - pci_unregister_driver(&matrox_w1_pci_driver); -} - -module_init(matrox_w1_init); -module_exit(matrox_w1_fini); +module_pci_driver(matrox_w1_pci_driver); diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index 41cecb55766c..9ba1153465ae 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c @@ -269,9 +269,8 @@ static int at91wdt_remove(struct platform_device *pdev) if (res) dev_warn(dev, "failed to unregister restart handler\n"); - res = misc_deregister(&at91wdt_miscdev); - if (!res) - at91wdt_miscdev.parent = NULL; + misc_deregister(&at91wdt_miscdev); + at91wdt_miscdev.parent = NULL; return res; } diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index b7ea39b455c8..1e41818a44bc 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -254,13 +254,10 @@ static int ks8695wdt_probe(struct platform_device *pdev) static int ks8695wdt_remove(struct platform_device *pdev) { - int res; - - res = misc_deregister(&ks8695wdt_miscdev); - if (!res) - ks8695wdt_miscdev.parent = NULL; + misc_deregister(&ks8695wdt_miscdev); + ks8695wdt_miscdev.parent = NULL; - return res; + return 0; } static void ks8695wdt_shutdown(struct platform_device *pdev) diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 119beb7f6017..4b541934b6c5 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -428,7 +428,8 @@ static int ts72xx_wdt_probe(struct platform_device *pdev) static int ts72xx_wdt_remove(struct platform_device *pdev) { - return misc_deregister(&ts72xx_wdt_miscdev); + misc_deregister(&ts72xx_wdt_miscdev); + return 0; } static struct platform_driver ts72xx_wdt_driver = { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index cd7ef34d2dce..6bad63379a4c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2163,8 +2163,7 @@ static int btrfs_interface_init(void) static void btrfs_interface_exit(void) { - if (misc_deregister(&btrfs_misc) < 0) - printk(KERN_INFO "BTRFS: misc_deregister failed for control device\n"); + misc_deregister(&btrfs_misc); } static void btrfs_print_info(void) diff --git a/fs/char_dev.c b/fs/char_dev.c index ea06a3d0364c..24b142569ca9 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -274,7 +274,7 @@ out2: } /** - * unregister_chrdev_region() - return a range of device numbers + * unregister_chrdev_region() - unregister a range of device numbers * @from: the first in the range of numbers to unregister * @count: the number of device numbers to unregister * diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index e0ab3a93eeff..5532f097f6da 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -509,7 +509,6 @@ int dlm_plock_init(void) void dlm_plock_exit(void) { - if (misc_deregister(&plock_dev_misc) < 0) - log_print("dlm_plock_exit: misc_deregister failed"); + misc_deregister(&plock_dev_misc); } diff --git a/fs/dlm/user.c b/fs/dlm/user.c index fb85f32e9eca..75ecc0d3bc85 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -362,18 +362,15 @@ fail: int dlm_device_deregister(struct dlm_ls *ls) { - int error; - /* The device is not registered. This happens when the lockspace was never used from userspace, or when device_create_lockspace() calls dlm_release_lockspace() after the register fails. */ if (!ls->ls_device.name) return 0; - error = misc_deregister(&ls->ls_device); - if (!error) - kfree(ls->ls_device.name); - return error; + misc_deregister(&ls->ls_device); + kfree(ls->ls_device.name); + return 0; } static int device_user_purge(struct dlm_user_proc *proc, diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 2768eb1da2b8..ced70c8139f7 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -655,14 +655,7 @@ static int ocfs2_control_init(void) static void ocfs2_control_exit(void) { - int rc; - - rc = misc_deregister(&ocfs2_control_device); - if (rc) - printk(KERN_ERR - "ocfs2: Unable to deregister ocfs2_control device " - "(errno %d)\n", - -rc); + misc_deregister(&ocfs2_control_device); } static void fsdlm_lock_ast_wrapper(void *astarg) diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 3486b9082adb..c69e1b932809 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -14,6 +14,7 @@ #define _LINUX_CORESIGHT_H #include <linux/device.h> +#include <linux/sched.h> /* Peripheral id registers (0xFD0-0xFEC) */ #define CORESIGHT_PERIPHIDR4 0xfd0 @@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data( struct device *dev, struct device_node *node) { return NULL; } #endif +#ifdef CONFIG_PID_NS +static inline unsigned long +coresight_vpid_to_pid(unsigned long vpid) +{ + struct task_struct *task = NULL; + unsigned long pid = 0; + + rcu_read_lock(); + task = find_task_by_vpid(vpid); + if (task) + pid = task_pid_nr(task); + rcu_read_unlock(); + + return pid; +} +#else +static inline unsigned long +coresight_vpid_to_pid(unsigned long vpid) { return vpid; } +#endif + #endif diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 30d3a1f79450..54733d5b503e 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -977,6 +977,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, const char *mod_name); void vmbus_driver_unregister(struct hv_driver *hv_driver); +int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, + resource_size_t min, resource_size_t max, + resource_size_t size, resource_size_t align, + bool fb_overlap_ok); + /** * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device * @@ -1233,8 +1238,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); -extern struct resource hyperv_mmio; - /* * Negotiated version with the Host. */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index e804306ef5e8..b63218f68c4b 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -323,6 +323,7 @@ struct pt_regs; struct task_struct; static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } +#define kexec_in_progress false #endif /* CONFIG_KEXEC */ #endif /* !defined(__ASSEBMLY__) */ diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index a16b1f9c1aca..0962b2ca628a 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -6,6 +6,7 @@ #include <linux/mod_devicetable.h> struct mei_cl_device; +struct mei_device; typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, u32 events, void *context); @@ -17,6 +18,8 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, * Drivers for MEI devices will get an mei_cl_device pointer * when being probed and shall use it for doing ME bus I/O. * + * @bus_list: device on the bus list + * @bus: parent mei device * @dev: linux driver model device pointer * @me_cl: me client * @cl: mei client @@ -25,10 +28,16 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, * @event_cb: Drivers register this callback to get asynchronous ME * events (e.g. Rx buffer pending) notifications. * @event_context: event callback run context + * @events_mask: Events bit mask requested by driver. * @events: Events bitmask sent to the driver. + * + * @do_match: wheather device can be matched with a driver + * @is_added: device is already scanned * @priv_data: client private data */ struct mei_cl_device { + struct list_head bus_list; + struct mei_device *bus; struct device dev; struct mei_me_client *me_cl; @@ -38,8 +47,12 @@ struct mei_cl_device { struct work_struct event_work; mei_cl_event_cb_t event_cb; void *event_context; + unsigned long events_mask; unsigned long events; + unsigned int do_match:1; + unsigned int is_added:1; + void *priv_data; }; @@ -65,10 +78,12 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); int mei_cl_register_event_cb(struct mei_cl_device *device, + unsigned long event_mask, mei_cl_event_cb_t read_cb, void *context); #define MEI_CL_EVENT_RX 0 #define MEI_CL_EVENT_TX 1 +#define MEI_CL_EVENT_NOTIF 2 void *mei_cl_get_drvdata(const struct mei_cl_device *device); void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 819077c32690..81f6e427ba6b 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -67,7 +67,7 @@ struct miscdevice { }; extern int misc_register(struct miscdevice *misc); -extern int misc_deregister(struct miscdevice *misc); +extern void misc_deregister(struct miscdevice *misc); #define MODULE_ALIAS_MISCDEV(minor) \ MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \ diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h new file mode 100644 index 000000000000..9bb77d3ed6e0 --- /dev/null +++ b/include/linux/nvmem-consumer.h @@ -0,0 +1,157 @@ +/* + * nvmem framework consumer. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _LINUX_NVMEM_CONSUMER_H +#define _LINUX_NVMEM_CONSUMER_H + +struct device; +struct device_node; +/* consumer cookie */ +struct nvmem_cell; +struct nvmem_device; + +struct nvmem_cell_info { + const char *name; + unsigned int offset; + unsigned int bytes; + unsigned int bit_offset; + unsigned int nbits; +}; + +#if IS_ENABLED(CONFIG_NVMEM) + +/* Cell based interface */ +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); +void nvmem_cell_put(struct nvmem_cell *cell); +void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); +void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); +int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); + +/* direct nvmem device read/write interface */ +struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); +struct nvmem_device *devm_nvmem_device_get(struct device *dev, + const char *name); +void nvmem_device_put(struct nvmem_device *nvmem); +void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem); +int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset, + size_t bytes, void *buf); +int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset, + size_t bytes, void *buf); +ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); +int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, void *buf); + +#else + +static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_nvmem_cell_put(struct device *dev, + struct nvmem_cell *cell) +{ + +} +static inline void nvmem_cell_put(struct nvmem_cell *cell) +{ +} + +static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int nvmem_cell_write(struct nvmem_cell *cell, + const char *buf, size_t len) +{ + return -ENOSYS; +} + +static inline struct nvmem_device *nvmem_device_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void nvmem_device_put(struct nvmem_device *nvmem) +{ +} + +static inline void devm_nvmem_device_put(struct device *dev, + struct nvmem_device *nvmem) +{ +} + +static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, + void *buf) +{ + return -ENOSYS; +} + +static inline int nvmem_device_cell_write(struct nvmem_device *nvmem, + struct nvmem_cell_info *info, + void *buf) +{ + return -ENOSYS; +} + +static inline int nvmem_device_read(struct nvmem_device *nvmem, + unsigned int offset, size_t bytes, + void *buf) +{ + return -ENOSYS; +} + +static inline int nvmem_device_write(struct nvmem_device *nvmem, + unsigned int offset, size_t bytes, + void *buf) +{ + return -ENOSYS; +} +#endif /* CONFIG_NVMEM */ + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) +struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name); +struct nvmem_device *of_nvmem_device_get(struct device_node *np, + const char *name); +#else +static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, + const char *name) +{ + return ERR_PTR(-ENOSYS); +} +#endif /* CONFIG_NVMEM && CONFIG_OF */ + +#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h new file mode 100644 index 000000000000..0b68caff1b3c --- /dev/null +++ b/include/linux/nvmem-provider.h @@ -0,0 +1,47 @@ +/* + * nvmem framework provider. + * + * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> + * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _LINUX_NVMEM_PROVIDER_H +#define _LINUX_NVMEM_PROVIDER_H + +struct nvmem_device; +struct nvmem_cell_info; + +struct nvmem_config { + struct device *dev; + const char *name; + int id; + struct module *owner; + const struct nvmem_cell_info *cells; + int ncells; + bool read_only; +}; + +#if IS_ENABLED(CONFIG_NVMEM) + +struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); +int nvmem_unregister(struct nvmem_device *nvmem); + +#else + +static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int nvmem_unregister(struct nvmem_device *nvmem) +{ + return -ENOSYS; +} + +#endif /* CONFIG_NVMEM */ + +#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index c78dcfeaf25f..d4217eff489f 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -86,7 +86,6 @@ struct st_proto_s { extern long st_register(struct st_proto_s *); extern long st_unregister(struct st_proto_s *); -extern struct ti_st_plat_data *dt_pdata; /* * header information used by st_core.c diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h new file mode 100644 index 000000000000..62f005ef4c7e --- /dev/null +++ b/include/trace/events/spmi.h @@ -0,0 +1,135 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM spmi + +#if !defined(_TRACE_SPMI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SPMI_H + +#include <linux/spmi.h> +#include <linux/tracepoint.h> + +/* + * drivers/spmi/spmi.c + */ + +TRACE_EVENT(spmi_write_begin, + TP_PROTO(u8 opcode, u8 sid, u16 addr, u8 len, const u8 *buf), + TP_ARGS(opcode, sid, addr, len, buf), + + TP_STRUCT__entry( + __field ( u8, opcode ) + __field ( u8, sid ) + __field ( u16, addr ) + __field ( u8, len ) + __dynamic_array ( u8, buf, len + 1 ) + ), + + TP_fast_assign( + __entry->opcode = opcode; + __entry->sid = sid; + __entry->addr = addr; + __entry->len = len + 1; + memcpy(__get_dynamic_array(buf), buf, len + 1); + ), + + TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]", + (int)__entry->opcode, (int)__entry->sid, + (int)__entry->addr, (int)__entry->len, + (int)__entry->len, __get_dynamic_array(buf)) +); + +TRACE_EVENT(spmi_write_end, + TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret), + TP_ARGS(opcode, sid, addr, ret), + + TP_STRUCT__entry( + __field ( u8, opcode ) + __field ( u8, sid ) + __field ( u16, addr ) + __field ( int, ret ) + ), + + TP_fast_assign( + __entry->opcode = opcode; + __entry->sid = sid; + __entry->addr = addr; + __entry->ret = ret; + ), + + TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d", + (int)__entry->opcode, (int)__entry->sid, + (int)__entry->addr, __entry->ret) +); + +TRACE_EVENT(spmi_read_begin, + TP_PROTO(u8 opcode, u8 sid, u16 addr), + TP_ARGS(opcode, sid, addr), + + TP_STRUCT__entry( + __field ( u8, opcode ) + __field ( u8, sid ) + __field ( u16, addr ) + ), + + TP_fast_assign( + __entry->opcode = opcode; + __entry->sid = sid; + __entry->addr = addr; + ), + + TP_printk("opc=%d sid=%02d addr=0x%04x", + (int)__entry->opcode, (int)__entry->sid, + (int)__entry->addr) +); + +TRACE_EVENT(spmi_read_end, + TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret, u8 len, const u8 *buf), + TP_ARGS(opcode, sid, addr, ret, len, buf), + + TP_STRUCT__entry( + __field ( u8, opcode ) + __field ( u8, sid ) + __field ( u16, addr ) + __field ( int, ret ) + __field ( u8, len ) + __dynamic_array ( u8, buf, len + 1 ) + ), + + TP_fast_assign( + __entry->opcode = opcode; + __entry->sid = sid; + __entry->addr = addr; + __entry->ret = ret; + __entry->len = len + 1; + memcpy(__get_dynamic_array(buf), buf, len + 1); + ), + + TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]", + (int)__entry->opcode, (int)__entry->sid, + (int)__entry->addr, __entry->ret, (int)__entry->len, + (int)__entry->len, __get_dynamic_array(buf)) +); + +TRACE_EVENT(spmi_cmd, + TP_PROTO(u8 opcode, u8 sid, int ret), + TP_ARGS(opcode, sid, ret), + + TP_STRUCT__entry( + __field ( u8, opcode ) + __field ( u8, sid ) + __field ( int, ret ) + ), + + TP_fast_assign( + __entry->opcode = opcode; + __entry->sid = sid; + __entry->ret = ret; + ), + + TP_printk("opc=%d sid=%02d ret=%d", (int)__entry->opcode, + (int)__entry->sid, ret) +); + +#endif /* _TRACE_SPMI_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h index bc0d8b69c49e..7c3b64f6a215 100644 --- a/include/uapi/linux/mei.h +++ b/include/uapi/linux/mei.h @@ -107,4 +107,23 @@ struct mei_connect_client_data { }; }; +/** + * DOC: set and unset event notification for a connected client + * + * The IOCTL argument is 1 for enabling event notification and 0 for + * disabling the service + * Return: -EOPNOTSUPP if the devices doesn't support the feature + */ +#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32) + +/** + * DOC: retrieve notification + * + * The IOCTL output argument is 1 if an event was is pending and 0 otherwise + * the ioctl has to be called in order to acknowledge pending event + * + * Return: -EOPNOTSUPP if the devices doesn't support the feature + */ +#define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32) + #endif /* _LINUX_MEI_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 5644ec5582b9..718ea76b78ba 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -191,17 +191,18 @@ void cpu_hotplug_done(void) void cpu_hotplug_disable(void) { cpu_maps_update_begin(); - cpu_hotplug_disabled = 1; + cpu_hotplug_disabled++; cpu_maps_update_done(); } +EXPORT_SYMBOL_GPL(cpu_hotplug_disable); void cpu_hotplug_enable(void) { cpu_maps_update_begin(); - cpu_hotplug_disabled = 0; + WARN_ON(--cpu_hotplug_disabled < 0); cpu_maps_update_done(); } - +EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ @@ -608,13 +609,18 @@ int disable_nonboot_cpus(void) } } - if (!error) { + if (!error) BUG_ON(num_online_cpus() > 1); - /* Make sure the CPUs won't be enabled by someone else */ - cpu_hotplug_disabled = 1; - } else { + else pr_err("Non-boot CPUs are not disabled\n"); - } + + /* + * Make sure the CPUs won't be enabled by someone else. We need to do + * this even in case of failure as all disable_nonboot_cpus() users are + * supposed to do enable_nonboot_cpus() on the failure path. + */ + cpu_hotplug_disabled++; + cpu_maps_update_done(); return error; } @@ -633,7 +639,7 @@ void __ref enable_nonboot_cpus(void) /* Allow everyone to use the CPU hotplug again */ cpu_maps_update_begin(); - cpu_hotplug_disabled = 0; + WARN_ON(--cpu_hotplug_disabled < 0); if (cpumask_empty(frozen_cpus)) goto out; diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index c89fdcaf06e8..2f4b7ffd5570 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -2,7 +2,7 @@ """Find Kconfig symbols that are referenced but not defined.""" -# (c) 2014-2015 Valentin Rothberg <Valentin.Rothberg@lip6.fr> +# (c) 2014-2015 Valentin Rothberg <valentinrothberg@gmail.com> # (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de> # # Licensed under the terms of the GNU GPL License version 2 @@ -20,18 +20,20 @@ OPERATORS = r"&|\(|\)|\||\!" FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}" DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*" EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+" -STMT = r"^\s*(?:if|select|depends\s+on)\s+" + EXPR +DEFAULT = r"default\s+.*?(?:if\s.+){,1}" +STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")" # regex objects REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") -REGEX_FEATURE = re.compile(r"(" + FEATURE + r")") +REGEX_FEATURE = re.compile(r'(?!\B"[^"]*)' + FEATURE + r'(?![^"]*"\B)') REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE) REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) REGEX_KCONFIG_STMT = re.compile(STMT) REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$") REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$") +REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") def parse_options(): @@ -58,6 +60,11 @@ def parse_options(): "input format bases on Git log's " "\'commmit1..commit2\'.") + parser.add_option('-f', '--find', dest='find', action='store_true', + default=False, + help="Find and show commits that may cause symbols to be " + "missing. Required to run with --diff.") + parser.add_option('-i', '--ignore', dest='ignore', action='store', default="", help="Ignore files matching this pattern. Note that " @@ -86,6 +93,9 @@ def parse_options(): "'--force' if you\nwant to ignore this warning and " "continue.") + if opts.commit: + opts.find = False + if opts.ignore: try: re.match(opts.ignore, "this/is/just/a/test.c") @@ -128,13 +138,19 @@ def main(): # feature has not been undefined before if not feature in undefined_a: files = sorted(undefined_b.get(feature)) - print "%s\t%s" % (feature, ", ".join(files)) + print "%s\t%s" % (yel(feature), ", ".join(files)) + if opts.find: + commits = find_commits(feature, opts.diff) + print red(commits) # check if there are new files that reference the undefined feature else: files = sorted(undefined_b.get(feature) - undefined_a.get(feature)) if files: - print "%s\t%s" % (feature, ", ".join(files)) + print "%s\t%s" % (yel(feature), ", ".join(files)) + if opts.find: + commits = find_commits(feature, opts.diff) + print red(commits) # reset to head execute("git reset --hard %s" % head) @@ -144,7 +160,21 @@ def main(): undefined = check_symbols(opts.ignore) for feature in sorted(undefined): files = sorted(undefined.get(feature)) - print "%s\t%s" % (feature, ", ".join(files)) + print "%s\t%s" % (yel(feature), ", ".join(files)) + + +def yel(string): + """ + Color %string yellow. + """ + return "\033[33m%s\033[0m" % string + + +def red(string): + """ + Color %string red. + """ + return "\033[31m%s\033[0m" % string def execute(cmd): @@ -156,6 +186,13 @@ def execute(cmd): return stdout +def find_commits(symbol, diff): + """Find commits changing %symbol in the given range of %diff.""" + commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s" + % (symbol, diff)) + return commits + + def tree_is_dirty(): """Return true if the current working tree is dirty (i.e., if any file has been added, deleted, modified, renamed or copied but not committed).""" @@ -279,6 +316,9 @@ def parse_kconfig_file(kfile, defined_features, referenced_features): line = line.strip('\n') features.extend(get_features_in_line(line)) for feature in set(features): + if REGEX_NUMERIC.match(feature): + # ignore numeric values + continue paths = referenced_features.get(feature, set()) paths.add(kfile) referenced_features[feature] = paths diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus new file mode 100644 index 000000000000..162a3784d80e --- /dev/null +++ b/tools/hv/lsvmbus @@ -0,0 +1,101 @@ +#!/usr/bin/env python + +import os +from optparse import OptionParser + +parser = OptionParser() +parser.add_option("-v", "--verbose", dest="verbose", + help="print verbose messages. Try -vv, -vvv for \ + more verbose messages", action="count") + +(options, args) = parser.parse_args() + +verbose = 0 +if options.verbose is not None: + verbose = options.verbose + +vmbus_sys_path = '/sys/bus/vmbus/devices' +if not os.path.isdir(vmbus_sys_path): + print "%s doesn't exist: exiting..." % vmbus_sys_path + exit(-1) + +vmbus_dev_dict = { + '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]', + '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]', + '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]', + '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]', + '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]', + '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]', + '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]', + '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse', + '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard', + '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter', + '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter', + '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller', + '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', + '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', + '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', + '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', + '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', + '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', +} + +def get_vmbus_dev_attr(dev_name, attr): + try: + f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r') + lines = f.readlines() + f.close() + except IOError: + lines = [] + + return lines + +class VMBus_Dev: + pass + + +vmbus_dev_list = [] + +for f in os.listdir(vmbus_sys_path): + vmbus_id = get_vmbus_dev_attr(f, 'id')[0].strip() + class_id = get_vmbus_dev_attr(f, 'class_id')[0].strip() + device_id = get_vmbus_dev_attr(f, 'device_id')[0].strip() + dev_desc = vmbus_dev_dict.get(class_id, 'Unknown') + + chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping') + chn_vp_mapping = [c.strip() for c in chn_vp_mapping] + chn_vp_mapping = sorted(chn_vp_mapping, + key = lambda c : int(c.split(':')[0])) + + chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' % + (c.split(':')[0], c.split(':')[1]) + for c in chn_vp_mapping] + d = VMBus_Dev() + d.sysfs_path = '%s/%s' % (vmbus_sys_path, f) + d.vmbus_id = vmbus_id + d.class_id = class_id + d.device_id = device_id + d.dev_desc = dev_desc + d.chn_vp_mapping = '\n'.join(chn_vp_mapping) + if d.chn_vp_mapping: + d.chn_vp_mapping += '\n' + + vmbus_dev_list.append(d) + + +vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id)) + +format0 = '%2s: %s' +format1 = '%2s: Class_ID = %s - %s\n%s' +format2 = '%2s: Class_ID = %s - %s\n\tDevice_ID = %s\n\tSysfs path: %s\n%s' + +for d in vmbus_dev_list: + if verbose == 0: + print ('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc) + elif verbose == 1: + print ('VMBUS ID ' + format1) % \ + (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping) + else: + print ('VMBUS ID ' + format2) % \ + (d.vmbus_id, d.class_id, d.dev_desc, \ + d.device_id, d.sysfs_path, d.chn_vp_mapping) |