summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/attribute_container.c2
-rw-r--r--drivers/base/core.c146
-rw-r--r--drivers/base/cpu.c136
-rw-r--r--drivers/base/firmware_class.c132
-rw-r--r--drivers/base/memory.c257
-rw-r--r--drivers/base/pinctrl.c19
-rw-r--r--drivers/base/platform.c12
-rw-r--r--drivers/base/power/domain.c1
-rw-r--r--drivers/base/power/generic_ops.c23
-rw-r--r--drivers/base/power/opp.c4
-rw-r--r--drivers/base/power/qos.c6
-rw-r--r--drivers/base/power/runtime.c12
-rw-r--r--drivers/base/power/wakeup.c9
-rw-r--r--drivers/base/regmap/internal.h10
-rw-r--r--drivers/base/regmap/regcache-rbtree.c62
-rw-r--r--drivers/base/regmap/regcache.c83
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap.c156
19 files changed, 776 insertions, 304 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 07abd9d76f7f..5daa2599ed48 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -2,7 +2,6 @@ menu "Generic Driver Options"
config UEVENT_HELPER_PATH
string "path to uevent helper"
- depends on HOTPLUG
default ""
help
Path to uevent helper program forked by the kernel for
@@ -23,7 +22,6 @@ config UEVENT_HELPER_PATH
config DEVTMPFS
bool "Maintain a devtmpfs filesystem to mount at /dev"
- depends on HOTPLUG
help
This creates a tmpfs/ramfs filesystem instance early at bootup.
In this filesystem, the kernel driver core maintains device
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index d78b204e65c1..ecc1929d7f6a 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
ic->classdev.parent = get_device(dev);
ic->classdev.class = cont->class;
cont->class->dev_release = attribute_container_release;
- dev_set_name(&ic->classdev, dev_name(dev));
+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
if (fn)
fn(cont, dev, &ic->classdev);
else
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2499cefdcdf2..dc3ea237f086 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -193,12 +193,12 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
EXPORT_SYMBOL_GPL(device_show_bool);
/**
- * device_release - free device structure.
- * @kobj: device's kobject.
+ * device_release - free device structure.
+ * @kobj: device's kobject.
*
- * This is called once the reference count for the object
- * reaches 0. We forward the call to the device's release
- * method, which should handle actually freeing the structure.
+ * This is called once the reference count for the object
+ * reaches 0. We forward the call to the device's release
+ * method, which should handle actually freeing the structure.
*/
static void device_release(struct kobject *kobj)
{
@@ -403,6 +403,36 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
static struct device_attribute uevent_attr =
__ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent);
+static ssize_t show_online(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ bool val;
+
+ lock_device_hotplug();
+ val = !dev->offline;
+ unlock_device_hotplug();
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t store_online(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ lock_device_hotplug();
+ ret = val ? device_online(dev) : device_offline(dev);
+ unlock_device_hotplug();
+ return ret < 0 ? ret : count;
+}
+
+static struct device_attribute online_attr =
+ __ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
+
static int device_add_attributes(struct device *dev,
struct device_attribute *attrs)
{
@@ -516,6 +546,12 @@ static int device_add_attrs(struct device *dev)
if (error)
goto err_remove_type_groups;
+ if (device_supports_offline(dev) && !dev->offline_disabled) {
+ error = device_create_file(dev, &online_attr);
+ if (error)
+ goto err_remove_type_groups;
+ }
+
return 0;
err_remove_type_groups:
@@ -536,6 +572,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &online_attr);
device_remove_groups(dev, dev->groups);
if (type)
@@ -1334,8 +1371,8 @@ const char *device_get_devnode(struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @data: data for the callback.
* @fn: function to be called for each device.
+ * @data: data for the callback.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -1363,8 +1400,8 @@ int device_for_each_child(struct device *parent, void *data,
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @data: Data to pass to match function
* @match: Callback function to check device
+ * @data: Data to pass to match function
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
@@ -1374,6 +1411,8 @@ int device_for_each_child(struct device *parent, void *data,
* if it does. If the callback returns non-zero and a reference to the
* current device can be obtained, this function will return to the caller
* and not iterate over any more devices.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
*/
struct device *device_find_child(struct device *parent, void *data,
int (*match)(struct device *dev, void *data))
@@ -1433,6 +1472,99 @@ EXPORT_SYMBOL_GPL(put_device);
EXPORT_SYMBOL_GPL(device_create_file);
EXPORT_SYMBOL_GPL(device_remove_file);
+static DEFINE_MUTEX(device_hotplug_lock);
+
+void lock_device_hotplug(void)
+{
+ mutex_lock(&device_hotplug_lock);
+}
+
+void unlock_device_hotplug(void)
+{
+ mutex_unlock(&device_hotplug_lock);
+}
+
+static int device_check_offline(struct device *dev, void *not_used)
+{
+ int ret;
+
+ ret = device_for_each_child(dev, NULL, device_check_offline);
+ if (ret)
+ return ret;
+
+ return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
+}
+
+/**
+ * device_offline - Prepare the device for hot-removal.
+ * @dev: Device to be put offline.
+ *
+ * Execute the device bus type's .offline() callback, if present, to prepare
+ * the device for a subsequent hot-removal. If that succeeds, the device must
+ * not be used until either it is removed or its bus type's .online() callback
+ * is executed.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_offline(struct device *dev)
+{
+ int ret;
+
+ if (dev->offline_disabled)
+ return -EPERM;
+
+ ret = device_for_each_child(dev, NULL, device_check_offline);
+ if (ret)
+ return ret;
+
+ device_lock(dev);
+ if (device_supports_offline(dev)) {
+ if (dev->offline) {
+ ret = 1;
+ } else {
+ ret = dev->bus->offline(dev);
+ if (!ret) {
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ dev->offline = true;
+ }
+ }
+ }
+ device_unlock(dev);
+
+ return ret;
+}
+
+/**
+ * device_online - Put the device back online after successful device_offline().
+ * @dev: Device to be put back online.
+ *
+ * If device_offline() has been successfully executed for @dev, but the device
+ * has not been removed subsequently, execute its bus type's .online() callback
+ * to indicate that the device can be used again.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_online(struct device *dev)
+{
+ int ret = 0;
+
+ device_lock(dev);
+ if (device_supports_offline(dev)) {
+ if (dev->offline) {
+ ret = dev->bus->online(dev);
+ if (!ret) {
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ dev->offline = false;
+ }
+ } else {
+ ret = 1;
+ }
+ }
+ device_unlock(dev);
+
+ return ret;
+}
+
struct root_device {
struct device dev;
struct module *owner;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 3d48fc887ef4..a16d20e389f0 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -13,17 +13,21 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/percpu.h>
+#include <linux/acpi.h>
#include "base.h"
-struct bus_type cpu_subsys = {
- .name = "cpu",
- .dev_name = "cpu",
-};
-EXPORT_SYMBOL_GPL(cpu_subsys);
-
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
+static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+{
+ /* ACPI style match is the only one that may succeed. */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void change_cpu_under_node(struct cpu *cpu,
unsigned int from_nid, unsigned int to_nid)
@@ -34,69 +38,45 @@ static void change_cpu_under_node(struct cpu *cpu,
cpu->node_id = to_nid;
}
-static ssize_t show_online(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int __ref cpu_subsys_online(struct device *dev)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = dev->id;
+ int from_nid, to_nid;
+ int ret;
+
+ cpu_hotplug_driver_lock();
+
+ from_nid = cpu_to_node(cpuid);
+ ret = cpu_up(cpuid);
+ /*
+ * When hot adding memory to memoryless node and enabling a cpu
+ * on the node, node number of the cpu may internally change.
+ */
+ to_nid = cpu_to_node(cpuid);
+ if (from_nid != to_nid)
+ change_cpu_under_node(cpu, from_nid, to_nid);
- return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
+ cpu_hotplug_driver_unlock();
+ return ret;
}
-static ssize_t __ref store_online(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int cpu_subsys_offline(struct device *dev)
{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int cpuid = cpu->dev.id;
- int from_nid, to_nid;
- ssize_t ret;
+ int ret;
cpu_hotplug_driver_lock();
- switch (buf[0]) {
- case '0':
- ret = cpu_down(cpuid);
- if (!ret)
- kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
- break;
- case '1':
- from_nid = cpu_to_node(cpuid);
- ret = cpu_up(cpuid);
-
- /*
- * When hot adding memory to memoryless node and enabling a cpu
- * on the node, node number of the cpu may internally change.
- */
- to_nid = cpu_to_node(cpuid);
- if (from_nid != to_nid)
- change_cpu_under_node(cpu, from_nid, to_nid);
-
- if (!ret)
- kobject_uevent(&dev->kobj, KOBJ_ONLINE);
- break;
- default:
- ret = -EINVAL;
- }
+ ret = cpu_down(dev->id);
cpu_hotplug_driver_unlock();
-
- if (ret >= 0)
- ret = count;
return ret;
}
-static DEVICE_ATTR(online, 0644, show_online, store_online);
-static void __cpuinit register_cpu_control(struct cpu *cpu)
-{
- device_create_file(&cpu->dev, &dev_attr_online);
-}
void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
- device_remove_file(&cpu->dev, &dev_attr_online);
-
device_unregister(&cpu->dev);
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
@@ -122,13 +102,19 @@ static ssize_t cpu_release_store(struct device *dev,
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
-#else /* ... !CONFIG_HOTPLUG_CPU */
-static inline void register_cpu_control(struct cpu *cpu)
-{
-}
#endif /* CONFIG_HOTPLUG_CPU */
+struct bus_type cpu_subsys = {
+ .name = "cpu",
+ .dev_name = "cpu",
+ .match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+ .online = cpu_subsys_online,
+ .offline = cpu_subsys_offline,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
@@ -164,8 +150,32 @@ static ssize_t show_crash_notes_size(struct device *dev,
return rc;
}
static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
+
+static struct attribute *crash_note_cpu_attrs[] = {
+ &dev_attr_crash_notes.attr,
+ &dev_attr_crash_notes_size.attr,
+ NULL
+};
+
+static struct attribute_group crash_note_cpu_attr_group = {
+ .attrs = crash_note_cpu_attrs,
+};
#endif
+static const struct attribute_group *common_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+ &crash_note_cpu_attr_group,
+#endif
+ NULL
+};
+
+static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+ &crash_note_cpu_attr_group,
+#endif
+ NULL
+};
+
/*
* Print cpu online, possible, present, and system maps
*/
@@ -277,24 +287,20 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
+ cpu->dev.offline_disabled = !cpu->hotpluggable;
+ cpu->dev.offline = !cpu_online(num);
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
cpu->dev.bus->uevent = arch_cpu_uevent;
#endif
+ cpu->dev.groups = common_cpu_attr_groups;
+ if (cpu->hotpluggable)
+ cpu->dev.groups = hotplugable_cpu_attr_groups;
error = device_register(&cpu->dev);
- if (!error && cpu->hotpluggable)
- register_cpu_control(cpu);
if (!error)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
-#ifdef CONFIG_KEXEC
- if (!error)
- error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
- if (!error)
- error = device_create_file(&cpu->dev,
- &dev_attr_crash_notes_size);
-#endif
return error;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 01e21037d8fe..a439602ea919 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,7 @@
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
+#include <linux/reboot.h>
#include <generated/utsrelease.h>
@@ -127,9 +128,11 @@ struct firmware_buf {
size_t size;
#ifdef CONFIG_FW_LOADER_USER_HELPER
bool is_paged_buf;
+ bool need_uevent;
struct page **pages;
int nr_pages;
int page_array_size;
+ struct list_head pending_list;
#endif
char fw_id[];
};
@@ -171,6 +174,9 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ INIT_LIST_HEAD(&buf->pending_list);
+#endif
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
@@ -212,18 +218,6 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
return tmp ? 0 : -ENOMEM;
}
-static struct firmware_buf *fw_lookup_buf(const char *fw_name)
-{
- struct firmware_buf *tmp;
- struct firmware_cache *fwc = &fw_cache;
-
- spin_lock(&fwc->lock);
- tmp = __fw_lookup_buf(fw_name);
- spin_unlock(&fwc->lock);
-
- return tmp;
-}
-
static void __fw_free_buf(struct kref *ref)
{
struct firmware_buf *buf = to_fwbuf(ref);
@@ -446,10 +440,8 @@ static struct firmware_priv *to_firmware_priv(struct device *dev)
return container_of(dev, struct firmware_priv, dev);
}
-static void fw_load_abort(struct firmware_priv *fw_priv)
+static void __fw_load_abort(struct firmware_buf *buf)
{
- struct firmware_buf *buf = fw_priv->buf;
-
/*
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
@@ -457,8 +449,16 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
if (test_bit(FW_STATUS_DONE, &buf->status))
return;
+ list_del_init(&buf->pending_list);
set_bit(FW_STATUS_ABORT, &buf->status);
complete_all(&buf->completion);
+}
+
+static void fw_load_abort(struct firmware_priv *fw_priv)
+{
+ struct firmware_buf *buf = fw_priv->buf;
+
+ __fw_load_abort(buf);
/* avoid user action after loading abort */
fw_priv->buf = NULL;
@@ -467,6 +467,25 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
#define is_fw_load_aborted(buf) \
test_bit(FW_STATUS_ABORT, &(buf)->status)
+static LIST_HEAD(pending_fw_head);
+
+/* reboot notifier for avoid deadlock with usermode_lock */
+static int fw_shutdown_notify(struct notifier_block *unused1,
+ unsigned long unused2, void *unused3)
+{
+ mutex_lock(&fw_lock);
+ while (!list_empty(&pending_fw_head))
+ __fw_load_abort(list_first_entry(&pending_fw_head,
+ struct firmware_buf,
+ pending_list));
+ mutex_unlock(&fw_lock);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+ .notifier_call = fw_shutdown_notify,
+};
+
static ssize_t firmware_timeout_show(struct class *class,
struct class_attribute *attr,
char *buf)
@@ -509,8 +528,6 @@ static void fw_dev_release(struct device *dev)
struct firmware_priv *fw_priv = to_firmware_priv(dev);
kfree(fw_priv);
-
- module_put(THIS_MODULE);
}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -619,6 +636,7 @@ static ssize_t firmware_loading_store(struct device *dev,
* is completed.
* */
fw_map_pages_buf(fw_buf);
+ list_del_init(&fw_buf->pending_list);
complete_all(&fw_buf->completion);
break;
}
@@ -838,9 +856,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
dev_set_uevent_suppress(f_dev, true);
- /* Need to pin this module until class device is destroyed */
- __module_get(THIS_MODULE);
-
retval = device_add(f_dev);
if (retval) {
dev_err(f_dev, "%s: device_register failed\n", __func__);
@@ -860,6 +875,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
}
if (uevent) {
+ buf->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
@@ -868,6 +884,10 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
+ mutex_lock(&fw_lock);
+ list_add(&buf->pending_list, &pending_fw_head);
+ mutex_unlock(&fw_lock);
+
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
@@ -895,6 +915,23 @@ static int fw_load_from_user_helper(struct firmware *firmware,
fw_priv->buf = firmware->priv;
return _request_firmware_load(fw_priv, uevent, timeout);
}
+
+#ifdef CONFIG_PM_SLEEP
+/* kill pending requests without uevent to avoid blocking suspend */
+static void kill_requests_without_uevent(void)
+{
+ struct firmware_buf *buf;
+ struct firmware_buf *next;
+
+ mutex_lock(&fw_lock);
+ list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
+ if (!buf->need_uevent)
+ __fw_load_abort(buf);
+ }
+ mutex_unlock(&fw_lock);
+}
+#endif
+
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int
fw_load_from_user_helper(struct firmware *firmware, const char *name,
@@ -907,6 +944,10 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
/* No abort during direct loading */
#define is_fw_load_aborted(buf) false
+#ifdef CONFIG_PM_SLEEP
+static inline void kill_requests_without_uevent(void) { }
+#endif
+
#endif /* CONFIG_FW_LOADER_USER_HELPER */
@@ -974,7 +1015,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 1; /* need to load */
}
-static int assign_firmware_buf(struct firmware *fw, struct device *device)
+static int assign_firmware_buf(struct firmware *fw, struct device *device,
+ bool skip_cache)
{
struct firmware_buf *buf = fw->priv;
@@ -991,7 +1033,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device)
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (device)
+ if (device && !skip_cache)
fw_add_devm_name(device, buf->fw_id);
/*
@@ -1047,8 +1089,10 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (!fw_get_filesystem_firmware(device, fw->priv))
ret = fw_load_from_user_helper(fw, name, device,
uevent, nowait, timeout);
+
+ /* don't cache firmware handled without uevent */
if (!ret)
- ret = assign_firmware_buf(fw, device);
+ ret = assign_firmware_buf(fw, device, !uevent);
usermodehelper_read_unlock();
@@ -1086,8 +1130,15 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
- return _request_firmware(firmware_p, name, device, true, false);
+ int ret;
+
+ /* Need to pin this module until return */
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, true, false);
+ module_put(THIS_MODULE);
+ return ret;
}
+EXPORT_SYMBOL(request_firmware);
/**
* release_firmware: - release the resource associated with a firmware image
@@ -1101,6 +1152,7 @@ void release_firmware(const struct firmware *fw)
kfree(fw);
}
}
+EXPORT_SYMBOL(release_firmware);
/* Async support */
struct firmware_work {
@@ -1181,6 +1233,10 @@ request_firmware_nowait(
schedule_work(&fw_work->work);
return 0;
}
+EXPORT_SYMBOL(request_firmware_nowait);
+
+#ifdef CONFIG_PM_SLEEP
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
* cache_firmware - cache one firmware image in kernel memory space
@@ -1196,7 +1252,7 @@ request_firmware_nowait(
* Return !0 otherwise
*
*/
-int cache_firmware(const char *fw_name)
+static int cache_firmware(const char *fw_name)
{
int ret;
const struct firmware *fw;
@@ -1212,6 +1268,18 @@ int cache_firmware(const char *fw_name)
return ret;
}
+static struct firmware_buf *fw_lookup_buf(const char *fw_name)
+{
+ struct firmware_buf *tmp;
+ struct firmware_cache *fwc = &fw_cache;
+
+ spin_lock(&fwc->lock);
+ tmp = __fw_lookup_buf(fw_name);
+ spin_unlock(&fwc->lock);
+
+ return tmp;
+}
+
/**
* uncache_firmware - remove one cached firmware image
* @fw_name: the firmware image name
@@ -1223,7 +1291,7 @@ int cache_firmware(const char *fw_name)
* Return !0 otherwise
*
*/
-int uncache_firmware(const char *fw_name)
+static int uncache_firmware(const char *fw_name)
{
struct firmware_buf *buf;
struct firmware fw;
@@ -1242,9 +1310,6 @@ int uncache_firmware(const char *fw_name)
return -EINVAL;
}
-#ifdef CONFIG_PM_SLEEP
-static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
-
static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
@@ -1464,6 +1529,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ kill_requests_without_uevent();
device_cache_fw_images();
break;
@@ -1526,6 +1592,7 @@ static int __init firmware_class_init(void)
{
fw_cache_init();
#ifdef CONFIG_FW_LOADER_USER_HELPER
+ register_reboot_notifier(&fw_shutdown_nb);
return class_register(&firmware_class);
#else
return 0;
@@ -1539,15 +1606,10 @@ static void __exit firmware_class_exit(void)
unregister_pm_notifier(&fw_cache.pm_notify);
#endif
#ifdef CONFIG_FW_LOADER_USER_HELPER
+ unregister_reboot_notifier(&fw_shutdown_nb);
class_unregister(&firmware_class);
#endif
}
fs_initcall(firmware_class_init);
module_exit(firmware_class_exit);
-
-EXPORT_SYMBOL(release_firmware);
-EXPORT_SYMBOL(request_firmware);
-EXPORT_SYMBOL(request_firmware_nowait);
-EXPORT_SYMBOL_GPL(cache_firmware);
-EXPORT_SYMBOL_GPL(uncache_firmware);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f8a6954da0..2b7813ec6d02 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -37,9 +37,14 @@ static inline int base_memory_block_id(int section_nr)
return section_nr / sections_per_block;
}
+static int memory_subsys_online(struct device *dev);
+static int memory_subsys_offline(struct device *dev);
+
static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
.dev_name = MEMORY_CLASS_NAME,
+ .online = memory_subsys_online,
+ .offline = memory_subsys_offline,
};
static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -77,22 +82,6 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
-{
- int error;
-
- memory->dev.bus = &memory_subsys;
- memory->dev.id = memory->start_section_nr / sections_per_block;
- memory->dev.release = memory_block_release;
-
- error = device_register(&memory->dev);
- return error;
-}
-
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
@@ -278,33 +267,64 @@ static int __memory_block_change_state(struct memory_block *mem,
{
int ret = 0;
- if (mem->state != from_state_req) {
- ret = -EINVAL;
- goto out;
- }
+ if (mem->state != from_state_req)
+ return -EINVAL;
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
ret = memory_block_action(mem->start_section_nr, to_state, online_type);
+ mem->state = ret ? from_state_req : to_state;
+ return ret;
+}
- if (ret) {
- mem->state = from_state_req;
- goto out;
- }
+static int memory_subsys_online(struct device *dev)
+{
+ struct memory_block *mem = container_of(dev, struct memory_block, dev);
+ int ret;
- mem->state = to_state;
- switch (mem->state) {
- case MEM_OFFLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
- break;
- case MEM_ONLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
- break;
- default:
- break;
+ mutex_lock(&mem->state_mutex);
+
+ ret = mem->state == MEM_ONLINE ? 0 :
+ __memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE,
+ ONLINE_KEEP);
+
+ mutex_unlock(&mem->state_mutex);
+ return ret;
+}
+
+static int memory_subsys_offline(struct device *dev)
+{
+ struct memory_block *mem = container_of(dev, struct memory_block, dev);
+ int ret;
+
+ mutex_lock(&mem->state_mutex);
+
+ ret = mem->state == MEM_OFFLINE ? 0 :
+ __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
+
+ mutex_unlock(&mem->state_mutex);
+ return ret;
+}
+
+static int __memory_block_change_state_uevent(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req,
+ int online_type)
+{
+ int ret = __memory_block_change_state(mem, to_state, from_state_req,
+ online_type);
+ if (!ret) {
+ switch (mem->state) {
+ case MEM_OFFLINE:
+ kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
+ break;
+ case MEM_ONLINE:
+ kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
+ break;
+ default:
+ break;
+ }
}
-out:
return ret;
}
@@ -315,8 +335,8 @@ static int memory_block_change_state(struct memory_block *mem,
int ret;
mutex_lock(&mem->state_mutex);
- ret = __memory_block_change_state(mem, to_state, from_state_req,
- online_type);
+ ret = __memory_block_change_state_uevent(mem, to_state, from_state_req,
+ online_type);
mutex_unlock(&mem->state_mutex);
return ret;
@@ -326,22 +346,34 @@ store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct memory_block *mem;
+ bool offline;
int ret = -EINVAL;
mem = container_of(dev, struct memory_block, dev);
- if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
+ lock_device_hotplug();
+
+ if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) {
+ offline = false;
ret = memory_block_change_state(mem, MEM_ONLINE,
MEM_OFFLINE, ONLINE_KERNEL);
- else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
+ } else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) {
+ offline = false;
ret = memory_block_change_state(mem, MEM_ONLINE,
MEM_OFFLINE, ONLINE_MOVABLE);
- else if (!strncmp(buf, "online", min_t(int, count, 6)))
+ } else if (!strncmp(buf, "online", min_t(int, count, 6))) {
+ offline = false;
ret = memory_block_change_state(mem, MEM_ONLINE,
MEM_OFFLINE, ONLINE_KEEP);
- else if(!strncmp(buf, "offline", min_t(int, count, 7)))
+ } else if(!strncmp(buf, "offline", min_t(int, count, 7))) {
+ offline = true;
ret = memory_block_change_state(mem, MEM_OFFLINE,
MEM_ONLINE, -1);
+ }
+ if (!ret)
+ dev->offline = offline;
+
+ unlock_device_hotplug();
if (ret)
return ret;
@@ -371,11 +403,6 @@ static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
-#define mem_create_simple_file(mem, attr_name) \
- device_create_file(&mem->dev, &dev_attr_##attr_name)
-#define mem_remove_simple_file(mem, attr_name) \
- device_remove_file(&mem->dev, &dev_attr_##attr_name)
-
/*
* Block size attribute stuff
*/
@@ -388,12 +415,6 @@ print_block_size(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
-static int block_size_init(void)
-{
- return device_create_file(memory_subsys.dev_root,
- &dev_attr_block_size_bytes);
-}
-
/*
* Some architectures will have custom drivers to do this, and
* will not need to do it from userspace. The fake hot-add code
@@ -429,17 +450,8 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
out:
return ret;
}
-static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
-static int memory_probe_init(void)
-{
- return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
-}
-#else
-static inline int memory_probe_init(void)
-{
- return 0;
-}
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
#endif
#ifdef CONFIG_MEMORY_FAILURE
@@ -485,23 +497,6 @@ store_hard_offline_page(struct device *dev,
static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
-
-static __init int memory_fail_init(void)
-{
- int err;
-
- err = device_create_file(memory_subsys.dev_root,
- &dev_attr_soft_offline_page);
- if (!err)
- err = device_create_file(memory_subsys.dev_root,
- &dev_attr_hard_offline_page);
- return err;
-}
-#else
-static inline int memory_fail_init(void)
-{
- return 0;
-}
#endif
/*
@@ -546,6 +541,42 @@ struct memory_block *find_memory_block(struct mem_section *section)
return find_memory_block_hinted(section, NULL);
}
+static struct attribute *memory_memblk_attrs[] = {
+ &dev_attr_phys_index.attr,
+ &dev_attr_end_phys_index.attr,
+ &dev_attr_state.attr,
+ &dev_attr_phys_device.attr,
+ &dev_attr_removable.attr,
+ NULL
+};
+
+static struct attribute_group memory_memblk_attr_group = {
+ .attrs = memory_memblk_attrs,
+};
+
+static const struct attribute_group *memory_memblk_attr_groups[] = {
+ &memory_memblk_attr_group,
+ NULL,
+};
+
+/*
+ * register_memory - Setup a sysfs device for a memory block
+ */
+static
+int register_memory(struct memory_block *memory)
+{
+ int error;
+
+ memory->dev.bus = &memory_subsys;
+ memory->dev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.release = memory_block_release;
+ memory->dev.groups = memory_memblk_attr_groups;
+ memory->dev.offline = memory->state == MEM_OFFLINE;
+
+ error = device_register(&memory->dev);
+ return error;
+}
+
static int init_memory_block(struct memory_block **memory,
struct mem_section *section, unsigned long state)
{
@@ -569,16 +600,6 @@ static int init_memory_block(struct memory_block **memory,
mem->phys_device = arch_get_memory_phys_device(start_pfn);
ret = register_memory(mem);
- if (!ret)
- ret = mem_create_simple_file(mem, phys_index);
- if (!ret)
- ret = mem_create_simple_file(mem, end_phys_index);
- if (!ret)
- ret = mem_create_simple_file(mem, state);
- if (!ret)
- ret = mem_create_simple_file(mem, phys_device);
- if (!ret)
- ret = mem_create_simple_file(mem, removable);
*memory = mem;
return ret;
@@ -656,14 +677,9 @@ static int remove_memory_block(unsigned long node_id,
unregister_mem_sect_under_nodes(mem, __section_nr(section));
mem->section_count--;
- if (mem->section_count == 0) {
- mem_remove_simple_file(mem, phys_index);
- mem_remove_simple_file(mem, end_phys_index);
- mem_remove_simple_file(mem, state);
- mem_remove_simple_file(mem, phys_device);
- mem_remove_simple_file(mem, removable);
+ if (mem->section_count == 0)
unregister_memory(mem);
- } else
+ else
kobject_put(&mem->dev.kobj);
mutex_unlock(&mem_sysfs_mutex);
@@ -679,27 +695,35 @@ int unregister_memory_section(struct mem_section *section)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-/*
- * offline one memory block. If the memory block has been offlined, do nothing.
- */
-int offline_memory_block(struct memory_block *mem)
-{
- int ret = 0;
-
- mutex_lock(&mem->state_mutex);
- if (mem->state != MEM_OFFLINE)
- ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
- mutex_unlock(&mem->state_mutex);
-
- return ret;
-}
-
/* return true if the memory block is offlined, otherwise, return false */
bool is_memblock_offlined(struct memory_block *mem)
{
return mem->state == MEM_OFFLINE;
}
+static struct attribute *memory_root_attrs[] = {
+#ifdef CONFIG_ARCH_MEMORY_PROBE
+ &dev_attr_probe.attr,
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+ &dev_attr_soft_offline_page.attr,
+ &dev_attr_hard_offline_page.attr,
+#endif
+
+ &dev_attr_block_size_bytes.attr,
+ NULL
+};
+
+static struct attribute_group memory_root_attr_group = {
+ .attrs = memory_root_attrs,
+};
+
+static const struct attribute_group *memory_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
/*
* Initialize the sysfs support for memory devices...
*/
@@ -711,7 +735,7 @@ int __init memory_dev_init(void)
unsigned long block_sz;
struct memory_block *mem = NULL;
- ret = subsys_system_register(&memory_subsys, NULL);
+ ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
goto out;
@@ -734,15 +758,6 @@ int __init memory_dev_init(void)
ret = err;
}
- err = memory_probe_init();
- if (!ret)
- ret = err;
- err = memory_fail_init();
- if (!ret)
- ret = err;
- err = block_size_init();
- if (!ret)
- ret = err;
out:
if (ret)
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index 67a274e86727..5fb74b43848e 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -48,6 +48,25 @@ int pinctrl_bind_pins(struct device *dev)
goto cleanup_get;
}
+#ifdef CONFIG_PM
+ /*
+ * If power management is enabled, we also look for the optional
+ * sleep and idle pin states, with semantics as defined in
+ * <linux/pinctrl/pinctrl-state.h>
+ */
+ dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(dev->pins->sleep_state))
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no sleep pinctrl state\n");
+
+ dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(dev->pins->idle_state))
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no idle pinctrl state\n");
+#endif
+
return 0;
/*
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9eda84246ffd..15789875128e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -29,9 +29,6 @@
/* For automatically allocated device IDs */
static DEFINE_IDA(platform_devid_ida);
-#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
- driver))
-
struct device platform_bus = {
.init_name = "platform",
};
@@ -523,11 +520,13 @@ static void platform_drv_shutdown(struct device *_dev)
}
/**
- * platform_driver_register - register a driver for platform-level devices
+ * __platform_driver_register - register a driver for platform-level devices
* @drv: platform driver structure
*/
-int platform_driver_register(struct platform_driver *drv)
+int __platform_driver_register(struct platform_driver *drv,
+ struct module *owner)
{
+ drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
if (drv->probe)
drv->driver.probe = platform_drv_probe;
@@ -538,7 +537,7 @@ int platform_driver_register(struct platform_driver *drv)
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(platform_driver_register);
+EXPORT_SYMBOL_GPL(__platform_driver_register);
/**
* platform_driver_unregister - unregister a driver for platform-level devices
@@ -888,7 +887,6 @@ int platform_pm_restore(struct device *dev)
static const struct dev_pm_ops platform_dev_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
};
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 7072404c8b6d..bfb8955c406c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2143,7 +2143,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
- genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index bfd898b8988e..5ee030a864f9 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -12,29 +12,6 @@
#ifdef CONFIG_PM_RUNTIME
/**
- * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
- * @dev: Device to handle.
- *
- * If PM operations are defined for the @dev's driver and they include
- * ->runtime_idle(), execute it and return its error code, if nonzero.
- * Otherwise, execute pm_runtime_suspend() for the device and return 0.
- */
-int pm_generic_runtime_idle(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm && pm->runtime_idle) {
- int ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
- }
-
- pm_runtime_suspend(dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
-
-/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
* @dev: Device to suspend.
*
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index f0077cb8e249..c8ec186303db 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -648,14 +648,14 @@ int opp_init_cpufreq_table(struct device *dev,
list_for_each_entry(opp, &dev_opp->opp_list, node) {
if (opp->available) {
- freq_table[i].index = i;
+ freq_table[i].driver_data = i;
freq_table[i].frequency = opp->rate / 1000;
i++;
}
}
mutex_unlock(&dev_opp_list_lock);
- freq_table[i].index = i;
+ freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
*table = &freq_table[0];
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 71671c42ef45..5c1361a9e5dd 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -42,6 +42,7 @@
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
+#include <trace/events/power.h>
#include "power.h"
@@ -305,6 +306,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev);
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
if (!ret) {
req->dev = dev;
req->type = type;
@@ -349,6 +351,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -EINVAL;
}
+ trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
+ new_value);
if (curr_value != new_value)
ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
@@ -398,6 +402,8 @@ static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
+ trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
+ PM_QOS_DEFAULT_VALUE);
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
return ret;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ef13ad08afb2..268a35097578 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -293,11 +293,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
- if (dev->power.no_callbacks) {
- /* Assume ->runtime_idle() callback would have suspended. */
- retval = rpm_suspend(dev, rpmflags);
+ if (dev->power.no_callbacks)
goto out;
- }
/* Carry out an asynchronous or a synchronous idle notification. */
if (rpmflags & RPM_ASYNC) {
@@ -306,7 +303,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- goto out;
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
+ return 0;
}
dev->power.idle_notification = true;
@@ -326,14 +324,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
callback = dev->driver->pm->runtime_idle;
if (callback)
- __rpm_callback(callback, dev);
+ retval = __rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
- return retval;
+ return retval ? retval : rpm_suspend(dev, rpmflags);
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 79715e7fa43e..2d56f4113ae7 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -659,7 +659,7 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
}
EXPORT_SYMBOL_GPL(pm_wakeup_event);
-static void print_active_wakeup_sources(void)
+void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
int active = 0;
@@ -683,6 +683,7 @@ static void print_active_wakeup_sources(void)
last_activity_ws->name);
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
@@ -707,8 +708,10 @@ bool pm_wakeup_pending(void)
}
spin_unlock_irqrestore(&events_lock, flags);
- if (ret)
- print_active_wakeup_sources();
+ if (ret) {
+ pr_info("PM: Wakeup pending, aborting suspend\n");
+ pm_print_active_wakeup_sources();
+ }
return ret;
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index c130536e0ab0..29c83160ca29 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -52,6 +52,7 @@ struct regmap_async {
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
+ unsigned long spinlock_flags;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@@ -148,6 +149,7 @@ struct regcache_ops {
int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
+ int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
};
bool regmap_writeable(struct regmap *map, unsigned int reg);
@@ -174,6 +176,14 @@ struct regmap_range_node {
unsigned int window_len;
};
+struct regmap_field {
+ struct regmap *regmap;
+ unsigned int mask;
+ /* lsb */
+ unsigned int shift;
+ unsigned int reg;
+};
+
#ifdef CONFIG_DEBUG_FS
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 02f490bad30f..5c1435c4e210 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -304,6 +304,48 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
return 0;
}
+static struct regcache_rbtree_node *
+regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+{
+ struct regcache_rbtree_node *rbnode;
+ const struct regmap_range *range;
+ int i;
+
+ rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
+ if (!rbnode)
+ return NULL;
+
+ /* If there is a read table then use it to guess at an allocation */
+ if (map->rd_table) {
+ for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
+ if (regmap_reg_in_range(reg,
+ &map->rd_table->yes_ranges[i]))
+ break;
+ }
+
+ if (i != map->rd_table->n_yes_ranges) {
+ range = &map->rd_table->yes_ranges[i];
+ rbnode->blklen = range->range_max - range->range_min
+ + 1;
+ rbnode->base_reg = range->range_min;
+ }
+ }
+
+ if (!rbnode->blklen) {
+ rbnode->blklen = sizeof(*rbnode);
+ rbnode->base_reg = reg;
+ }
+
+ rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
+ GFP_KERNEL);
+ if (!rbnode->block) {
+ kfree(rbnode);
+ return NULL;
+ }
+
+ return rbnode;
+}
+
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
@@ -354,23 +396,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
return 0;
}
}
- /* we did not manage to find a place to insert it in an existing
- * block so create a new rbnode with a single register in its block.
- * This block will get populated further if any other adjacent
- * registers get modified in the future.
+
+ /* We did not manage to find a place to insert it in
+ * an existing block so create a new rbnode.
*/
- rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+ rbnode = regcache_rbtree_node_alloc(map, reg);
if (!rbnode)
return -ENOMEM;
- rbnode->blklen = sizeof(*rbnode);
- rbnode->base_reg = reg;
- rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
- GFP_KERNEL);
- if (!rbnode->block) {
- kfree(rbnode);
- return -ENOMEM;
- }
- regcache_rbtree_set_register(map, rbnode, 0, value);
+ regcache_rbtree_set_register(map, rbnode,
+ reg - rbnode->base_reg, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 507ee2da0f6e..e69102696533 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -250,6 +250,38 @@ int regcache_write(struct regmap *map,
return 0;
}
+static int regcache_default_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ unsigned int reg;
+
+ for (reg = min; reg <= max; reg++) {
+ unsigned int val;
+ int ret;
+
+ if (regmap_volatile(map, reg))
+ continue;
+
+ ret = regcache_read(map, reg, &val);
+ if (ret)
+ return ret;
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, reg);
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
+ continue;
+
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, reg, val);
+ map->cache_bypass = 0;
+ if (ret)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
+ }
+
+ return 0;
+}
+
/**
* regcache_sync: Sync the register cache with the hardware.
*
@@ -268,7 +300,7 @@ int regcache_sync(struct regmap *map)
const char *name;
unsigned int bypass;
- BUG_ON(!map->cache_ops || !map->cache_ops->sync);
+ BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
@@ -297,7 +329,10 @@ int regcache_sync(struct regmap *map)
}
map->cache_bypass = 0;
- ret = map->cache_ops->sync(map, 0, map->max_register);
+ if (map->cache_ops->sync)
+ ret = map->cache_ops->sync(map, 0, map->max_register);
+ else
+ ret = regcache_default_sync(map, 0, map->max_register);
if (ret == 0)
map->cache_dirty = false;
@@ -331,7 +366,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
const char *name;
unsigned int bypass;
- BUG_ON(!map->cache_ops || !map->cache_ops->sync);
+ BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@@ -346,7 +381,10 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
if (!map->cache_dirty)
goto out;
- ret = map->cache_ops->sync(map, min, max);
+ if (map->cache_ops->sync)
+ ret = map->cache_ops->sync(map, min, max);
+ else
+ ret = regcache_default_sync(map, min, max);
out:
trace_regcache_sync(map->dev, name, "stop region");
@@ -359,6 +397,43 @@ out:
EXPORT_SYMBOL_GPL(regcache_sync_region);
/**
+ * regcache_drop_region: Discard part of the register cache
+ *
+ * @map: map to operate on
+ * @min: first register to discard
+ * @max: last register to discard
+ *
+ * Discard part of the register cache.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ unsigned int reg;
+ int ret = 0;
+
+ if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ trace_regcache_drop_region(map->dev, min, max);
+
+ if (map->cache_present)
+ for (reg = min; reg < max + 1; reg++)
+ clear_bit(reg, map->cache_present);
+
+ if (map->cache_ops && map->cache_ops->drop)
+ ret = map->cache_ops->drop(map, min, max);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_drop_region);
+
+/**
* regcache_cache_only: Put a register map into cache only mode
*
* @map: map to configure
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 975719bc3450..53495753fbdb 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -84,6 +84,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
unsigned int fpos_offset;
unsigned int reg_offset;
+ /* Suppress the cache if we're using a subrange */
+ if (from)
+ return from;
+
/*
* If we don't have a cache build one so we don't have to do a
* linear scan each time.
@@ -145,7 +149,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len);
mutex_unlock(&map->cache_lock);
- return c->base_reg + reg_offset;
+ return c->base_reg + (reg_offset * map->reg_stride);
}
*pos = c->max;
@@ -281,7 +285,7 @@ static ssize_t regmap_map_write_file(struct file *file,
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
ret = regmap_write(map, reg, value);
if (ret < 0)
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index a941dcfe7590..95920583e31e 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -65,9 +65,8 @@ bool regmap_reg_in_ranges(unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
-static bool _regmap_check_range_table(struct regmap *map,
- unsigned int reg,
- const struct regmap_access_table *table)
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+ const struct regmap_access_table *table)
{
/* Check "no ranges" first */
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
@@ -80,6 +79,7 @@ static bool _regmap_check_range_table(struct regmap *map,
return regmap_reg_in_ranges(reg, table->yes_ranges,
table->n_yes_ranges);
}
+EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
@@ -90,7 +90,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
return map->writeable_reg(map->dev, reg);
if (map->wr_table)
- return _regmap_check_range_table(map, reg, map->wr_table);
+ return regmap_check_range_table(map, reg, map->wr_table);
return true;
}
@@ -107,7 +107,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
return map->readable_reg(map->dev, reg);
if (map->rd_table)
- return _regmap_check_range_table(map, reg, map->rd_table);
+ return regmap_check_range_table(map, reg, map->rd_table);
return true;
}
@@ -121,9 +121,12 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
return map->volatile_reg(map->dev, reg);
if (map->volatile_table)
- return _regmap_check_range_table(map, reg, map->volatile_table);
+ return regmap_check_range_table(map, reg, map->volatile_table);
- return true;
+ if (map->cache_ops)
+ return false;
+ else
+ return true;
}
bool regmap_precious(struct regmap *map, unsigned int reg)
@@ -135,7 +138,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return map->precious_reg(map->dev, reg);
if (map->precious_table)
- return _regmap_check_range_table(map, reg, map->precious_table);
+ return regmap_check_range_table(map, reg, map->precious_table);
return false;
}
@@ -302,13 +305,16 @@ static void regmap_unlock_mutex(void *__map)
static void regmap_lock_spinlock(void *__map)
{
struct regmap *map = __map;
- spin_lock(&map->spinlock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&map->spinlock, flags);
+ map->spinlock_flags = flags;
}
static void regmap_unlock_spinlock(void *__map)
{
struct regmap *map = __map;
- spin_unlock(&map->spinlock);
+ spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
}
static void dev_get_regmap_release(struct device *dev, void *res)
@@ -801,6 +807,95 @@ struct regmap *devm_regmap_init(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_init);
+static void regmap_field_init(struct regmap_field *rm_field,
+ struct regmap *regmap, struct reg_field reg_field)
+{
+ int field_bits = reg_field.msb - reg_field.lsb + 1;
+ rm_field->regmap = regmap;
+ rm_field->reg = reg_field.reg;
+ rm_field->shift = reg_field.lsb;
+ rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
+}
+
+/**
+ * devm_regmap_field_alloc(): Allocate and initialise a register field
+ * in a register map.
+ *
+ * @dev: Device that will be interacted with
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field will be automatically freed
+ * by the device management code.
+ */
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+ struct regmap *regmap, struct reg_field reg_field)
+{
+ struct regmap_field *rm_field = devm_kzalloc(dev,
+ sizeof(*rm_field), GFP_KERNEL);
+ if (!rm_field)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_field_init(rm_field, regmap, reg_field);
+
+ return rm_field;
+
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
+
+/**
+ * devm_regmap_field_free(): Free register field allocated using
+ * devm_regmap_field_alloc. Usally drivers need not call this function,
+ * as the memory allocated via devm will be freed as per device-driver
+ * life-cyle.
+ *
+ * @dev: Device that will be interacted with
+ * @field: regmap field which should be freed.
+ */
+void devm_regmap_field_free(struct device *dev,
+ struct regmap_field *field)
+{
+ devm_kfree(dev, field);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_free);
+
+/**
+ * regmap_field_alloc(): Allocate and initialise a register field
+ * in a register map.
+ *
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field should be freed by the
+ * user once its finished working with it using regmap_field_free().
+ */
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+ struct reg_field reg_field)
+{
+ struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
+
+ if (!rm_field)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_field_init(rm_field, regmap, reg_field);
+
+ return rm_field;
+}
+EXPORT_SYMBOL_GPL(regmap_field_alloc);
+
+/**
+ * regmap_field_free(): Free register field allocated using regmap_field_alloc
+ *
+ * @field: regmap field which should be freed.
+ */
+void regmap_field_free(struct regmap_field *field)
+{
+ kfree(field);
+}
+EXPORT_SYMBOL_GPL(regmap_field_free);
+
/**
* regmap_reinit_cache(): Reinitialise the current register cache
*
@@ -1249,6 +1344,22 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
+/**
+ * regmap_field_write(): Write a value to a single register field
+ *
+ * @field: Register field to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_write(struct regmap_field *field, unsigned int val)
+{
+ return regmap_update_bits(field->regmap, field->reg,
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_field_write);
+
/*
* regmap_bulk_write(): Write multiple registers to the device
*
@@ -1532,6 +1643,31 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
+ * regmap_field_read(): Read a value to a single register field
+ *
+ * @field: Register field to read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_read(struct regmap_field *field, unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+ ret = regmap_read(field->regmap, field->reg, &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_field_read);
+
+/**
* regmap_bulk_read(): Read multiple registers from the device
*
* @map: Register map to write to