summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devres.c19
-rw-r--r--drivers/base/dma-contiguous.c4
-rw-r--r--drivers/base/pinctrl.c15
-rw-r--r--drivers/base/platform-msi.c6
-rw-r--r--drivers/base/platform.c80
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/clock_ops.c12
-rw-r--r--drivers/base/power/domain.c4
-rw-r--r--drivers/base/power/generic_ops.c23
-rw-r--r--drivers/base/power/opp/Makefile2
-rw-r--r--drivers/base/power/opp/core.c (renamed from drivers/base/power/opp.c)409
-rw-r--r--drivers/base/power/opp/cpu.c270
-rw-r--r--drivers/base/power/opp/opp.h146
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/property.c120
-rw-r--r--drivers/base/regmap/internal.h9
-rw-r--r--drivers/base/regmap/regcache-lzo.c4
-rw-r--r--drivers/base/regmap/regcache.c24
-rw-r--r--drivers/base/regmap/regmap-debugfs.c23
-rw-r--r--drivers/base/regmap/regmap-irq.c43
-rw-r--r--drivers/base/regmap/regmap.c41
-rw-r--r--drivers/base/soc.c21
26 files changed, 835 insertions, 470 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 6e810881e48b..71059e32bebc 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
*
* Note, you will need to drop the reference with put_device() after use.
*
- * @fn is allowed to do anything including calling back into class
+ * @match is allowed to do anything including calling back into class
* code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 334ec7ef1960..b7d56c5ea3c6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1066,7 +1066,7 @@ int device_add(struct device *dev)
dev->kobj.parent = kobj;
/* use parent numa_node */
- if (parent)
+ if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
set_dev_node(dev, dev_to_node(parent));
/* first, register with generic layer. */
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index be0eb4639128..a641cf3ccad6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -322,6 +322,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
+ pinctrl_init_done(dev);
+
if (dev->pm_domain && dev->pm_domain->sync)
dev->pm_domain->sync(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 875464690117..8fc654f0807b 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -82,12 +82,12 @@ static struct devres_group * node_to_group(struct devres_node *node)
}
static __always_inline struct devres * alloc_dr(dr_release_t release,
- size_t size, gfp_t gfp)
+ size_t size, gfp_t gfp, int nid)
{
size_t tot_size = sizeof(struct devres) + size;
struct devres *dr;
- dr = kmalloc_track_caller(tot_size, gfp);
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
@@ -106,24 +106,25 @@ static void add_dr(struct device *dev, struct devres_node *node)
}
#ifdef CONFIG_DEBUG_DEVRES
-void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
+void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
const char *name)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp | __GFP_ZERO);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
-EXPORT_SYMBOL_GPL(__devres_alloc);
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
#else
/**
* devres_alloc - Allocate device resource data
* @release: Release function devres will be associated with
* @size: Allocation size
* @gfp: Allocation flags
+ * @nid: NUMA node
*
* Allocate devres of @size bytes. The allocated area is zeroed, then
* associated with @release. The returned pointer can be passed to
@@ -132,16 +133,16 @@ EXPORT_SYMBOL_GPL(__devres_alloc);
* RETURNS:
* Pointer to allocated devres on success, NULL on failure.
*/
-void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
+void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp | __GFP_ZERO);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
return dr->data;
}
-EXPORT_SYMBOL_GPL(devres_alloc);
+EXPORT_SYMBOL_GPL(devres_alloc_node);
#endif
/**
@@ -776,7 +777,7 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kmalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
if (unlikely(!dr))
return NULL;
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 950fff9ce453..e167a1e1bccb 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -46,7 +46,7 @@ struct cma *dma_contiguous_default_area;
* Users, who want to set the size of global CMA area for their system
* should use cma= kernel parameter.
*/
-static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
static phys_addr_t size_cmdline = -1;
static phys_addr_t base_cmdline;
static phys_addr_t limit_cmdline;
@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* global one. Requires architecture specific dev_get_cma_area() helper
* function.
*/
-struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int align)
{
if (align > CONFIG_CMA_ALIGNMENT)
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index 5fb74b43848e..076297592754 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -42,9 +42,20 @@ int pinctrl_bind_pins(struct device *dev)
goto cleanup_get;
}
- ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+ dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_INIT);
+ if (IS_ERR(dev->pins->init_state)) {
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no init pinctrl state\n");
+
+ ret = pinctrl_select_state(dev->pins->p,
+ dev->pins->default_state);
+ } else {
+ ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
+ }
+
if (ret) {
- dev_dbg(dev, "failed to activate default pinctrl state\n");
+ dev_dbg(dev, "failed to activate initial pinctrl state\n");
goto cleanup_get;
}
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 134483daac25..5df4575b5ba7 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -152,7 +152,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
- * @np: Optional device-tree node of the interrupt controller
+ * @fwnode: Optional fwnode of the interrupt controller
* @info: MSI domain info
* @parent: Parent irq domain
*
@@ -162,7 +162,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec,
* Returns:
* A domain pointer or NULL in case of failure.
*/
-struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent)
{
@@ -173,7 +173,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
platform_msi_update_chip_ops(info);
- domain = msi_create_irq_domain(np, info, parent);
+ domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f80aaaf9f610..1dd6d3bf1098 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -513,7 +513,7 @@ static int platform_drv_probe(struct device *_dev)
return ret;
ret = dev_pm_domain_attach(_dev, true);
- if (ret != -EPROBE_DEFER) {
+ if (ret != -EPROBE_DEFER && drv->probe) {
ret = drv->probe(dev);
if (ret)
dev_pm_domain_detach(_dev, true);
@@ -536,9 +536,10 @@ static int platform_drv_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- int ret;
+ int ret = 0;
- ret = drv->remove(dev);
+ if (drv->remove)
+ ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
return ret;
@@ -549,7 +550,8 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- drv->shutdown(dev);
+ if (drv->shutdown)
+ drv->shutdown(dev);
dev_pm_domain_detach(_dev, true);
}
@@ -563,12 +565,9 @@ int __platform_driver_register(struct platform_driver *drv,
{
drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
- if (drv->probe)
- drv->driver.probe = platform_drv_probe;
- if (drv->remove)
- drv->driver.remove = platform_drv_remove;
- if (drv->shutdown)
- drv->driver.shutdown = platform_drv_shutdown;
+ drv->driver.probe = platform_drv_probe;
+ drv->driver.remove = platform_drv_remove;
+ drv->driver.shutdown = platform_drv_shutdown;
return driver_register(&drv->driver);
}
@@ -711,6 +710,67 @@ err_out:
}
EXPORT_SYMBOL_GPL(__platform_create_bundle);
+/**
+ * __platform_register_drivers - register an array of platform drivers
+ * @drivers: an array of drivers to register
+ * @count: the number of drivers to register
+ * @owner: module owning the drivers
+ *
+ * Registers platform drivers specified by an array. On failure to register a
+ * driver, all previously registered drivers will be unregistered. Callers of
+ * this API should use platform_unregister_drivers() to unregister drivers in
+ * the reverse order.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int __platform_register_drivers(struct platform_driver * const *drivers,
+ unsigned int count, struct module *owner)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < count; i++) {
+ pr_debug("registering platform driver %ps\n", drivers[i]);
+
+ err = __platform_driver_register(drivers[i], owner);
+ if (err < 0) {
+ pr_err("failed to register platform driver %ps: %d\n",
+ drivers[i], err);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ while (i--) {
+ pr_debug("unregistering platform driver %ps\n", drivers[i]);
+ platform_driver_unregister(drivers[i]);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__platform_register_drivers);
+
+/**
+ * platform_unregister_drivers - unregister an array of platform drivers
+ * @drivers: an array of drivers to unregister
+ * @count: the number of drivers to unregister
+ *
+ * Unegisters platform drivers specified by an array. This is typically used
+ * to complement an earlier call to platform_register_drivers(). Drivers are
+ * unregistered in the reverse order in which they were registered.
+ */
+void platform_unregister_drivers(struct platform_driver * const *drivers,
+ unsigned int count)
+{
+ while (count--) {
+ pr_debug("unregistering platform driver %ps\n", drivers[count]);
+ platform_driver_unregister(drivers[count]);
+ }
+}
+EXPORT_SYMBOL_GPL(platform_unregister_drivers);
+
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
* fully running: "modprobe $MODALIAS"
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index f94a6ccfe787..5998c53280f5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp.o
+obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 652b5a367c1f..60ee5591ee8f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -17,7 +17,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_CLK
enum pce_status {
PCE_STATUS_NONE = 0,
@@ -93,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
return -ENOMEM;
}
} else {
- if (IS_ERR(clk) || !__clk_get(clk)) {
+ if (IS_ERR(clk)) {
kfree(ce);
return -ENOENT;
}
@@ -127,7 +127,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
* @clk: Clock pointer
*
* Add the clock to the list of clocks used for the power management of @dev.
- * It will increment refcount on clock pointer, use clk_put() on it when done.
+ * The power-management code will take control of the clock reference, so
+ * callers should not call clk_put() on @clk after this function sucessfully
+ * returned.
*/
int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
@@ -404,7 +406,7 @@ int pm_clk_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#else /* !CONFIG_PM */
+#else /* !CONFIG_PM_CLK */
/**
* enable_clock - Enable a device clock.
@@ -484,7 +486,7 @@ static int pm_clk_notify(struct notifier_block *nb,
return 0;
}
-#endif /* !CONFIG_PM */
+#endif /* !CONFIG_PM_CLK */
/**
* pm_clk_add_notifier - Add bus type notifier for power management clocks.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 8ad59f3e6f80..b80379012840 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1263,6 +1263,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
return ret;
}
+EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
@@ -1313,6 +1314,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
return ret;
}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
@@ -1363,6 +1365,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
kfree(link);
return ret;
}
+EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
@@ -1410,6 +1413,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
/* Default device callbacks for generic PM domains. */
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 96a92db83cad..07c3c4a9522d 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -9,6 +9,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#ifdef CONFIG_PM
/**
@@ -296,11 +297,27 @@ void pm_generic_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+}
+/**
+ * pm_complete_with_resume_check - Complete a device power transition.
+ * @dev: Device to handle.
+ *
+ * Complete a device power transition during a system-wide power transition and
+ * optionally schedule a runtime resume of the device if the system resume in
+ * progress has been initated by the platform firmware and the device had its
+ * power.direct_complete flag set.
+ */
+void pm_complete_with_resume_check(struct device *dev)
+{
+ pm_generic_complete(dev);
/*
- * Let runtime PM try to suspend devices that haven't been in use before
- * going into the system-wide sleep state we're resuming from.
+ * If the device had been runtime-suspended before the system went into
+ * the sleep state it is going out of and it has never been resumed till
+ * now, resume it in case the firmware powered it up.
*/
- pm_request_idle(dev);
+ if (dev->power.direct_complete && pm_resume_via_firmware())
+ pm_request_resume(dev);
}
+EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
new file mode 100644
index 000000000000..33c1e18c41a4
--- /dev/null
+++ b/drivers/base/power/opp/Makefile
@@ -0,0 +1,2 @@
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+obj-y += core.o cpu.o
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp/core.c
index 7ae7cd990fbf..b8e76f75073b 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp/core.c
@@ -11,131 +11,16 @@
* published by the Free Software Foundation.
*/
-#include <linux/cpu.h>
-#include <linux/kernel.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
-/*
- * Internal data structure organization with the OPP layer library is as
- * follows:
- * dev_opp_list (root)
- * |- device 1 (represents voltage domain 1)
- * | |- opp 1 (availability, freq, voltage)
- * | |- opp 2 ..
- * ... ...
- * | `- opp n ..
- * |- device 2 (represents the next voltage domain)
- * ...
- * `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
- * is represented by the opp structure.
- */
-
-/**
- * struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
- * of boot. It is expected only an optimal set of OPPs are
- * added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
- * modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
- * IMPORTANT: the opp nodes should be maintained in increasing
- * order.
- * @dynamic: not-created from static DT entries.
- * @available: true/false - marks if this OPP as available or not
- * @turbo: true if turbo (boost) OPP
- * @rate: Frequency in hertz
- * @u_volt: Target voltage in microvolts corresponding to this OPP
- * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
- * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
- * @u_amp: Maximum current drawn by the device in microamperes
- * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
- * frequency from any other OPP's frequency.
- * @dev_opp: points back to the device_opp struct this opp belongs to
- * @rcu_head: RCU callback head used for deferred freeing
- * @np: OPP's device node.
- *
- * This structure stores the OPP information for a given device.
- */
-struct dev_pm_opp {
- struct list_head node;
-
- bool available;
- bool dynamic;
- bool turbo;
- unsigned long rate;
-
- unsigned long u_volt;
- unsigned long u_volt_min;
- unsigned long u_volt_max;
- unsigned long u_amp;
- unsigned long clock_latency_ns;
-
- struct device_opp *dev_opp;
- struct rcu_head rcu_head;
-
- struct device_node *np;
-};
-
-/**
- * struct device_list_opp - devices managed by 'struct device_opp'
- * @node: list node
- * @dev: device to which the struct object belongs
- * @rcu_head: RCU callback head used for deferred freeing
- *
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
- */
-struct device_list_opp {
- struct list_head node;
- const struct device *dev;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
- * have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
- * @srcu_head: notifier head to notify the OPP availability changes.
- * @rcu_head: RCU callback head used for deferred freeing
- * @dev_list: list of devices that share these OPPs
- * @opp_list: list of opps
- * @np: struct device_node pointer for opp's DT node.
- * @shared_opp: OPP is shared between multiple devices.
- *
- * This is an internal data structure maintaining the link to opps attached to
- * a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library.
- *
- * Because the opp structures can be used from both rcu and srcu readers, we
- * need to wait for the grace period of both of them before freeing any
- * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
- */
-struct device_opp {
- struct list_head node;
-
- struct srcu_notifier_head srcu_head;
- struct rcu_head rcu_head;
- struct list_head dev_list;
- struct list_head opp_list;
-
- struct device_node *np;
- unsigned long clock_latency_ns_max;
- bool shared_opp;
- struct dev_pm_opp *suspend_opp;
-};
+#include "opp.h"
/*
* The root of the list of all devices. All device_opp structures branch off
@@ -144,7 +29,7 @@ struct device_opp {
*/
static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
-static DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(dev_opp_list_lock);
#define opp_rcu_lockdep_assert() \
do { \
@@ -196,14 +81,18 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
- * Locking: This function must be called under rcu_read_lock(). device_opp
- * is a RCU protected pointer. This means that device_opp is valid as long
- * as we are under RCU lock.
+ * Locking: For readers, this function must be called under rcu_read_lock().
+ * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * as long as we are under RCU lock.
+ *
+ * For Writers, this function must be called with dev_opp_list_lock held.
*/
-static struct device_opp *_find_device_opp(struct device *dev)
+struct device_opp *_find_device_opp(struct device *dev)
{
struct device_opp *dev_opp;
+ opp_rcu_lockdep_assert();
+
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
@@ -217,7 +106,7 @@ static struct device_opp *_find_device_opp(struct device *dev)
}
/**
- * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
*
* Return: voltage in micro volt corresponding to the opp, else
@@ -239,7 +128,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
opp_rcu_lockdep_assert();
tmp_opp = rcu_dereference(opp);
- if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
+ if (IS_ERR_OR_NULL(tmp_opp))
pr_err("%s: Invalid parameters\n", __func__);
else
v = tmp_opp->u_volt;
@@ -579,8 +468,8 @@ static void _remove_list_dev(struct device_list_opp *list_dev,
_kfree_list_dev_rcu);
}
-static struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp)
{
struct device_list_opp *list_dev;
@@ -818,7 +707,7 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
}
/**
- * _opp_add_dynamic() - Allocate a dynamic OPP.
+ * _opp_add_v1() - Allocate a OPP based on v1 bindings.
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
@@ -828,8 +717,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
*
- * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
- * freed by of_free_opp_table.
+ * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
+ * and freed by dev_pm_opp_of_remove_table.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -844,8 +733,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
-static int _opp_add_dynamic(struct device *dev, unsigned long freq,
- long u_volt, bool dynamic)
+static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+ bool dynamic)
{
struct device_opp *dev_opp;
struct dev_pm_opp *new_opp;
@@ -887,9 +776,10 @@ unlock:
}
/* TODO: Support multiple regulators */
-static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
{
u32 microvolt[3] = {0};
+ u32 val;
int count, ret;
/* Missing property isn't a problem, but an invalid entry is */
@@ -922,6 +812,9 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
opp->u_volt_min = microvolt[1];
opp->u_volt_max = microvolt[2];
+ if (!of_property_read_u32(opp->np, "opp-microamp", &val))
+ opp->u_amp = val;
+
return 0;
}
@@ -986,13 +879,10 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_get_microvolt(new_opp, dev);
+ ret = opp_parse_supplies(new_opp, dev);
if (ret)
goto free_opp;
- if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
- new_opp->u_amp = val;
-
ret = _opp_add(dev, new_opp, dev_opp);
if (ret)
goto free_opp;
@@ -1056,7 +946,7 @@ unlock:
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
- return _opp_add_dynamic(dev, freq, u_volt, true);
+ return _opp_add_v1(dev, freq, u_volt, true);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
@@ -1220,7 +1110,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
#ifdef CONFIG_OF
/**
- * of_free_opp_table() - Free OPP table entries created from static DT entries
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ * entries
* @dev: device pointer used to lookup device OPPs.
*
* Free OPPs created using static entries present in DT.
@@ -1231,7 +1122,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-void of_free_opp_table(struct device *dev)
+void dev_pm_opp_of_remove_table(struct device *dev)
{
struct device_opp *dev_opp;
struct dev_pm_opp *opp, *tmp;
@@ -1266,92 +1157,39 @@ void of_free_opp_table(struct device *dev)
unlock:
mutex_unlock(&dev_opp_list_lock);
}
-EXPORT_SYMBOL_GPL(of_free_opp_table);
-
-void of_cpumask_free_opp_table(cpumask_var_t cpumask)
-{
- struct device *cpu_dev;
- int cpu;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- of_free_opp_table(cpu_dev);
- }
-}
-EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
-
-/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
-static struct device_node *
-_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
-{
- struct device_node *opp_np;
-
- opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
- if (!opp_np) {
- dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
- __func__, prop->name);
- return ERR_PTR(-EINVAL);
- }
-
- return opp_np;
-}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-/* Returns opp descriptor node for a device. Caller must do of_node_put() */
-static struct device_node *_of_get_opp_desc_node(struct device *dev)
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
{
- const struct property *prop;
-
- prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
- if (!prop)
- return ERR_PTR(-ENODEV);
- if (!prop->value)
- return ERR_PTR(-ENODATA);
-
/*
* TODO: Support for multiple OPP tables.
*
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
- if (prop->length != sizeof(__be32)) {
- dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- return _of_get_opp_desc_node_from_prop(dev, prop);
+ return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
}
/* Initializes OPP tables based on new bindings */
-static int _of_init_opp_table_v2(struct device *dev,
- const struct property *prop)
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
- struct device_node *opp_np, *np;
+ struct device_node *np;
struct device_opp *dev_opp;
int ret = 0, count = 0;
- if (!prop->value)
- return -ENODATA;
-
- /* Get opp node */
- opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
- if (IS_ERR(opp_np))
- return PTR_ERR(opp_np);
+ mutex_lock(&dev_opp_list_lock);
dev_opp = _managed_opp(opp_np);
if (dev_opp) {
/* OPPs are already managed */
if (!_add_list_dev(dev, dev_opp))
ret = -ENOMEM;
- goto put_opp_np;
+ mutex_unlock(&dev_opp_list_lock);
+ return ret;
}
+ mutex_unlock(&dev_opp_list_lock);
/* We have opp-list node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
@@ -1366,33 +1204,33 @@ static int _of_init_opp_table_v2(struct device *dev,
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
- ret = -ENOENT;
- goto put_opp_np;
- }
+ if (WARN_ON(!count))
+ return -ENOENT;
+
+ mutex_lock(&dev_opp_list_lock);
dev_opp = _find_device_opp(dev);
if (WARN_ON(IS_ERR(dev_opp))) {
ret = PTR_ERR(dev_opp);
+ mutex_unlock(&dev_opp_list_lock);
goto free_table;
}
dev_opp->np = opp_np;
dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
- of_node_put(opp_np);
+ mutex_unlock(&dev_opp_list_lock);
+
return 0;
free_table:
- of_free_opp_table(dev);
-put_opp_np:
- of_node_put(opp_np);
+ dev_pm_opp_of_remove_table(dev);
return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_init_opp_table_v1(struct device *dev)
+static int _of_add_opp_table_v1(struct device *dev)
{
const struct property *prop;
const __be32 *val;
@@ -1419,7 +1257,7 @@ static int _of_init_opp_table_v1(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (_opp_add_dynamic(dev, freq, volt, false))
+ if (_opp_add_v1(dev, freq, volt, false))
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
nr -= 2;
@@ -1429,7 +1267,7 @@ static int _of_init_opp_table_v1(struct device *dev)
}
/**
- * of_init_opp_table() - Initialize opp table from device tree
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup device OPPs.
*
* Register the initial OPP table with the OPP library for given device.
@@ -1451,153 +1289,28 @@ static int _of_init_opp_table_v1(struct device *dev)
* -ENODATA when empty 'operating-points' property is found
* -EINVAL when invalid entries are found in opp-v2 table
*/
-int of_init_opp_table(struct device *dev)
+int dev_pm_opp_of_add_table(struct device *dev)
{
- const struct property *prop;
+ struct device_node *opp_np;
+ int ret;
/*
* OPPs have two version of bindings now. The older one is deprecated,
* try for the new binding first.
*/
- prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
- if (!prop) {
+ opp_np = _of_get_opp_desc_node(dev);
+ if (!opp_np) {
/*
* Try old-deprecated bindings for backward compatibility with
* older dtbs.
*/
- return _of_init_opp_table_v1(dev);
+ return _of_add_opp_table_v1(dev);
}
- return _of_init_opp_table_v2(dev, prop);
-}
-EXPORT_SYMBOL_GPL(of_init_opp_table);
-
-int of_cpumask_init_opp_table(cpumask_var_t cpumask)
-{
- struct device *cpu_dev;
- int cpu, ret = 0;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- ret = of_init_opp_table(cpu_dev);
- if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
-
- /* Free all other OPPs */
- of_cpumask_free_opp_table(cpumask);
- break;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
-
-/* Required only for V1 bindings, as v2 can manage it from DT itself */
-int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
-{
- struct device_list_opp *list_dev;
- struct device_opp *dev_opp;
- struct device *dev;
- int cpu, ret = 0;
-
- rcu_read_lock();
-
- dev_opp = _find_device_opp(cpu_dev);
- if (IS_ERR(dev_opp)) {
- ret = -EINVAL;
- goto out_rcu_read_unlock;
- }
-
- for_each_cpu(cpu, cpumask) {
- if (cpu == cpu_dev->id)
- continue;
-
- dev = get_cpu_device(cpu);
- if (!dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- continue;
- }
-
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
- __func__, cpu);
- continue;
- }
- }
-out_rcu_read_unlock:
- rcu_read_unlock();
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
-
-/*
- * Works only for OPP v2 bindings.
- *
- * cpumask should be already set to mask of cpu_dev->id.
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
- */
-int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
-{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
- int cpu, ret = 0;
-
- /* Get OPP descriptor node */
- np = _of_get_opp_desc_node(cpu_dev);
- if (IS_ERR(np)) {
- dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
- PTR_ERR(np));
- return -ENOENT;
- }
-
- /* OPPs are shared ? */
- if (!of_property_read_bool(np, "opp-shared"))
- goto put_cpu_node;
-
- for_each_possible_cpu(cpu) {
- if (cpu == cpu_dev->id)
- continue;
-
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- ret = -ENODEV;
- goto put_cpu_node;
- }
-
- /* Get OPP descriptor node */
- tmp_np = _of_get_opp_desc_node(tcpu_dev);
- if (IS_ERR(tmp_np)) {
- dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
- __func__, PTR_ERR(tmp_np));
- ret = PTR_ERR(tmp_np);
- goto put_cpu_node;
- }
-
- /* CPUs are sharing opp node */
- if (np == tmp_np)
- cpumask_set_cpu(cpu, cpumask);
-
- of_node_put(tmp_np);
- }
+ ret = _of_add_opp_table_v2(dev, opp_np);
+ of_node_put(opp_np);
-put_cpu_node:
- of_node_put(np);
return ret;
}
-EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
#endif
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
new file mode 100644
index 000000000000..7b445e88a0d5
--- /dev/null
+++ b/drivers/base/power/opp/cpu.c
@@ -0,0 +1,270 @@
+/*
+ * Generic OPP helper interface for CPU device
+ *
+ * Copyright (C) 2009-2014 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "opp.h"
+
+#ifdef CONFIG_CPU_FREQ
+
+/**
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
+ * @dev: device for which we do this operation
+ * @table: Cpufreq table returned back to caller
+ *
+ * Generate a cpufreq table for a provided device- this assumes that the
+ * opp list is already initialized and ready for usage.
+ *
+ * This function allocates required memory for the cpufreq table. It is
+ * expected that the caller does the required maintenance such as freeing
+ * the table as required.
+ *
+ * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
+ * if no memory available for the operation (table is not populated), returns 0
+ * if successful and table is populated.
+ *
+ * WARNING: It is important for the callers to ensure refreshing their copy of
+ * the table if any of the mentioned functions have been invoked in the interim.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Since we just use the regular accessor functions to access the internal data
+ * structures, we use RCU read lock inside this function. As a result, users of
+ * this function DONOT need to use explicit locks for invoking.
+ */
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ struct dev_pm_opp *opp;
+ struct cpufreq_frequency_table *freq_table = NULL;
+ int i, max_opps, ret = 0;
+ unsigned long rate;
+
+ rcu_read_lock();
+
+ max_opps = dev_pm_opp_get_opp_count(dev);
+ if (max_opps <= 0) {
+ ret = max_opps ? max_opps : -ENODATA;
+ goto out;
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, rate = 0; i < max_opps; i++, rate++) {
+ /* find next rate */
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = rate / 1000;
+
+ /* Is Boost/turbo opp ? */
+ if (dev_pm_opp_is_turbo(opp))
+ freq_table[i].flags = CPUFREQ_BOOST_FREQ;
+ }
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+
+out:
+ rcu_read_unlock();
+ if (ret)
+ kfree(freq_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
+
+/**
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
+ */
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
+#endif /* CONFIG_CPU_FREQ */
+
+/* Required only for V1 bindings, as v2 can manage it from DT itself */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_list_opp *list_dev;
+ struct device_opp *dev_opp;
+ struct device *dev;
+ int cpu, ret = 0;
+
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = _find_device_opp(cpu_dev);
+ if (IS_ERR(dev_opp)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ for_each_cpu(cpu, cpumask) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+
+ list_dev = _add_list_dev(dev, dev_opp);
+ if (!list_dev) {
+ dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+ }
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
+
+#ifdef CONFIG_OF
+void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ dev_pm_opp_of_remove_table(cpu_dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu, ret = 0;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret) {
+ pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
+
+ /* Free all other OPPs */
+ dev_pm_opp_of_cpumask_remove_table(cpumask);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * cpumask should be already set to mask of cpu_dev->id.
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_node *np, *tmp_np;
+ struct device *tcpu_dev;
+ int cpu, ret = 0;
+
+ /* Get OPP descriptor node */
+ np = _of_get_opp_desc_node(cpu_dev);
+ if (!np) {
+ dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ return -ENOENT;
+ }
+
+ /* OPPs are shared ? */
+ if (!of_property_read_bool(np, "opp-shared"))
+ goto put_cpu_node;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ ret = -ENODEV;
+ goto put_cpu_node;
+ }
+
+ /* Get OPP descriptor node */
+ tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ if (!tmp_np) {
+ dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ __func__);
+ ret = -ENOENT;
+ goto put_cpu_node;
+ }
+
+ /* CPUs are sharing opp node */
+ if (np == tmp_np)
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(tmp_np);
+ }
+
+put_cpu_node:
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
+#endif
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
new file mode 100644
index 000000000000..7366b2aa8997
--- /dev/null
+++ b/drivers/base/power/opp/opp.h
@@ -0,0 +1,146 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVER_OPP_H__
+#define __DRIVER_OPP_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pm_opp.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+/* Lock to allow exclusive modification to the device and opp lists */
+extern struct mutex dev_opp_list_lock;
+
+/*
+ * Internal data structure organization with the OPP layer library is as
+ * follows:
+ * dev_opp_list (root)
+ * |- device 1 (represents voltage domain 1)
+ * | |- opp 1 (availability, freq, voltage)
+ * | |- opp 2 ..
+ * ... ...
+ * | `- opp n ..
+ * |- device 2 (represents the next voltage domain)
+ * ...
+ * `- device m (represents mth voltage domain)
+ * device 1, 2.. are represented by dev_opp structure while each opp
+ * is represented by the opp structure.
+ */
+
+/**
+ * struct dev_pm_opp - Generic OPP description structure
+ * @node: opp list node. The nodes are maintained throughout the lifetime
+ * of boot. It is expected only an optimal set of OPPs are
+ * added to the library by the SoC framework.
+ * RCU usage: opp list is traversed with RCU locks. node
+ * modification is possible realtime, hence the modifications
+ * are protected by the dev_opp_list_lock for integrity.
+ * IMPORTANT: the opp nodes should be maintained in increasing
+ * order.
+ * @dynamic: not-created from static DT entries.
+ * @available: true/false - marks if this OPP as available or not
+ * @turbo: true if turbo (boost) OPP
+ * @rate: Frequency in hertz
+ * @u_volt: Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp: Maximum current drawn by the device in microamperes
+ * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
+ * frequency from any other OPP's frequency.
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @rcu_head: RCU callback head used for deferred freeing
+ * @np: OPP's device node.
+ *
+ * This structure stores the OPP information for a given device.
+ */
+struct dev_pm_opp {
+ struct list_head node;
+
+ bool available;
+ bool dynamic;
+ bool turbo;
+ unsigned long rate;
+
+ unsigned long u_volt;
+ unsigned long u_volt_min;
+ unsigned long u_volt_max;
+ unsigned long u_amp;
+ unsigned long clock_latency_ns;
+
+ struct device_opp *dev_opp;
+ struct rcu_head rcu_head;
+
+ struct device_node *np;
+};
+
+/**
+ * struct device_list_opp - devices managed by 'struct device_opp'
+ * @node: list node
+ * @dev: device to which the struct object belongs
+ * @rcu_head: RCU callback head used for deferred freeing
+ *
+ * This is an internal data structure maintaining the list of devices that are
+ * managed by 'struct device_opp'.
+ */
+struct device_list_opp {
+ struct list_head node;
+ const struct device *dev;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct device_opp - Device opp structure
+ * @node: list node - contains the devices with OPPs that
+ * have been registered. Nodes once added are not modified in this
+ * list.
+ * RCU usage: nodes are not modified in the list of device_opp,
+ * however addition is possible and is secured by dev_opp_list_lock
+ * @srcu_head: notifier head to notify the OPP availability changes.
+ * @rcu_head: RCU callback head used for deferred freeing
+ * @dev_list: list of devices that share these OPPs
+ * @opp_list: list of opps
+ * @np: struct device_node pointer for opp's DT node.
+ * @shared_opp: OPP is shared between multiple devices.
+ *
+ * This is an internal data structure maintaining the link to opps attached to
+ * a device. This structure is not meant to be shared to users as it is
+ * meant for book keeping and private to OPP library.
+ *
+ * Because the opp structures can be used from both rcu and srcu readers, we
+ * need to wait for the grace period of both of them before freeing any
+ * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
+ */
+struct device_opp {
+ struct list_head node;
+
+ struct srcu_notifier_head srcu_head;
+ struct rcu_head rcu_head;
+ struct list_head dev_list;
+ struct list_head opp_list;
+
+ struct device_node *np;
+ unsigned long clock_latency_ns_max;
+ bool shared_opp;
+ struct dev_pm_opp *suspend_opp;
+};
+
+/* Routines internal to opp core */
+struct device_opp *_find_device_opp(struct device *dev);
+struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp);
+struct device_node *_of_get_opp_desc_node(struct device *dev);
+
+#endif /* __DRIVER_OPP_H__ */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e67451dec..0d77cd6fd8d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
struct wake_irq *wirq;
int err;
+ if (irq < 0)
+ return -EINVAL;
+
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
struct wake_irq *wirq;
int err;
+ if (irq < 0)
+ return -EINVAL;
+
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 51f15bc15774..a1e0b9ab847a 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -25,6 +25,9 @@
*/
bool events_check_enabled __read_mostly;
+/* First wakeup IRQ seen by the kernel in the last cycle. */
+unsigned int pm_wakeup_irq __read_mostly;
+
/* If set and the system is suspending, terminate the suspend. */
static bool pm_abort_suspend __read_mostly;
@@ -91,7 +94,7 @@ struct wakeup_source *wakeup_source_create(const char *name)
if (!ws)
return NULL;
- wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
+ wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
@@ -154,7 +157,7 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_drop(ws);
wakeup_source_record(ws);
- kfree(ws->name);
+ kfree_const(ws->name);
kfree(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
@@ -868,6 +871,15 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_wakeup_clear(void)
{
pm_abort_suspend = false;
+ pm_wakeup_irq = 0;
+}
+
+void pm_system_irq_wakeup(unsigned int irq_number)
+{
+ if (pm_wakeup_irq == 0) {
+ pm_wakeup_irq = irq_number;
+ pm_system_wakeup();
+ }
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 2d75366c61e0..1325ff225cc4 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -134,7 +134,7 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
if (is_of_node(fwnode))
return of_property_read_bool(to_of_node(fwnode), propname);
else if (is_acpi_node(fwnode))
- return !acpi_dev_prop_get(to_acpi_node(fwnode), propname, NULL);
+ return !acpi_node_prop_get(fwnode, propname, NULL);
return !!pset_prop_get(to_pset(fwnode), propname);
}
@@ -287,6 +287,28 @@ int device_property_read_string(struct device *dev, const char *propname,
}
EXPORT_SYMBOL_GPL(device_property_read_string);
+/**
+ * device_property_match_string - find a string in an array and return index
+ * @dev: Device to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of strings,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_match_string(struct device *dev, const char *propname,
+ const char *string)
+{
+ return fwnode_property_match_string(dev_fwnode(dev), propname, string);
+}
+EXPORT_SYMBOL_GPL(device_property_match_string);
+
#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \
(val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \
: of_property_count_elems_of_size((node), (propname), sizeof(type))
@@ -298,8 +320,8 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
_ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \
_type_, _val_, _nval_); \
else if (is_acpi_node(_fwnode_)) \
- _ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \
- _proptype_, _val_, _nval_); \
+ _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \
+ _val_, _nval_); \
else if (is_pset(_fwnode_)) \
_ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
_proptype_, _val_, _nval_); \
@@ -440,8 +462,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
propname, val, nval) :
of_property_count_strings(to_of_node(fwnode), propname);
else if (is_acpi_node(fwnode))
- return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
- DEV_PROP_STRING, val, nval);
+ return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
+ val, nval);
else if (is_pset(fwnode))
return pset_prop_read_array(to_pset(fwnode), propname,
DEV_PROP_STRING, val, nval);
@@ -470,8 +492,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
if (is_of_node(fwnode))
return of_property_read_string(to_of_node(fwnode), propname, val);
else if (is_acpi_node(fwnode))
- return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
- DEV_PROP_STRING, val, 1);
+ return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
+ val, 1);
return pset_prop_read_array(to_pset(fwnode), propname,
DEV_PROP_STRING, val, 1);
@@ -479,6 +501,52 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_read_string);
/**
+ * fwnode_property_match_string - find a string in an array and return index
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of strings,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_match_string(struct fwnode_handle *fwnode,
+ const char *propname, const char *string)
+{
+ const char **values;
+ int nval, ret, i;
+
+ nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+ if (nval < 0)
+ return nval;
+
+ values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_string_array(fwnode, propname, values, nval);
+ if (ret < 0)
+ goto out;
+
+ ret = -ENODATA;
+ for (i = 0; i < nval; i++) {
+ if (!strcmp(values[i], string)) {
+ ret = i;
+ break;
+ }
+ }
+out:
+ kfree(values);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+
+/**
* device_get_next_child_node - Return the next child node handle for a device
* @dev: Device to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
@@ -493,11 +561,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
if (node)
return &node->fwnode;
} else if (IS_ENABLED(CONFIG_ACPI)) {
- struct acpi_device *node;
-
- node = acpi_get_next_child(dev, to_acpi_node(child));
- if (node)
- return acpi_fwnode_handle(node);
+ return acpi_get_next_subnode(dev, child);
}
return NULL;
}
@@ -534,18 +598,34 @@ unsigned int device_get_child_node_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_child_node_count);
-bool device_dma_is_coherent(struct device *dev)
+bool device_dma_supported(struct device *dev)
{
- bool coherent = false;
-
+ /* For DT, this is always supported.
+ * For ACPI, this depends on CCA, which
+ * is determined by the acpi_dma_supported().
+ */
if (IS_ENABLED(CONFIG_OF) && dev->of_node)
- coherent = of_dma_is_coherent(dev->of_node);
- else
- acpi_check_dma(ACPI_COMPANION(dev), &coherent);
+ return true;
+
+ return acpi_dma_supported(ACPI_COMPANION(dev));
+}
+EXPORT_SYMBOL_GPL(device_dma_supported);
+
+enum dev_dma_attr device_get_dma_attr(struct device *dev)
+{
+ enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
- return coherent;
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ if (of_dma_is_coherent(dev->of_node))
+ attr = DEV_DMA_COHERENT;
+ else
+ attr = DEV_DMA_NON_COHERENT;
+ } else
+ attr = acpi_get_dma_attr(ACPI_COMPANION(dev));
+
+ return attr;
}
-EXPORT_SYMBOL_GPL(device_dma_is_coherent);
+EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
* device_get_phy_mode - Get phy mode for given device
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index cc557886ab23..3df977054781 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
+ gfp_t alloc_flags;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
@@ -98,6 +99,8 @@ struct regmap {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
bool defer_caching;
@@ -122,9 +125,9 @@ struct regmap {
unsigned int num_reg_defaults_raw;
/* if set, only the cache is modified not the HW */
- u32 cache_only;
+ bool cache_only;
/* if set, only the HW is modified not the cache */
- u32 cache_bypass;
+ bool cache_bypass;
/* if set, remember to free reg_defaults_raw */
bool cache_free;
@@ -132,7 +135,7 @@ struct regmap {
const void *reg_defaults_raw;
void *cache;
/* if set, the cache contains newer data than the HW */
- u32 cache_dirty;
+ bool cache_dirty;
/* if set, the HW registers are known to match map->reg_defaults */
bool no_sync_defaults;
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 2d53f6f138e1..736e0d378567 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -355,9 +355,9 @@ static int regcache_lzo_sync(struct regmap *map, unsigned int min,
if (ret > 0 && val == map->reg_defaults[ret].def)
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, i, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 6f8a13ec32a4..4c07802986b2 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -54,11 +54,11 @@ static int regcache_hw_init(struct regmap *map)
return -ENOMEM;
if (!map->reg_defaults_raw) {
- u32 cache_bypass = map->cache_bypass;
+ bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
/* Bypass the cache access till data read from HW*/
- map->cache_bypass = 1;
+ map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
ret = -ENOMEM;
@@ -285,9 +285,9 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
if (!regcache_reg_needs_sync(map, reg, val))
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, reg, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
reg, ret);
@@ -315,7 +315,7 @@ int regcache_sync(struct regmap *map)
int ret = 0;
unsigned int i;
const char *name;
- unsigned int bypass;
+ bool bypass;
BUG_ON(!map->cache_ops);
@@ -333,7 +333,7 @@ int regcache_sync(struct regmap *map)
map->async = true;
/* Apply any patch first */
- map->cache_bypass = 1;
+ map->cache_bypass = true;
for (i = 0; i < map->patch_regs; i++) {
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
@@ -342,7 +342,7 @@ int regcache_sync(struct regmap *map)
goto out;
}
}
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, 0, map->max_register);
@@ -384,7 +384,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
{
int ret = 0;
const char *name;
- unsigned int bypass;
+ bool bypass;
BUG_ON(!map->cache_ops);
@@ -637,11 +637,11 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
if (!regcache_reg_needs_sync(map, regtmp, val))
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, regtmp, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret != 0) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
regtmp, ret);
@@ -668,14 +668,14 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
count * val_bytes, count, base, cur - map->reg_stride);
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
*data = NULL;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 4c55cfbad19e..3f0a7e262d69 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -30,7 +30,7 @@ static LIST_HEAD(regmap_debugfs_early_list);
static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
-static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+static size_t regmap_calc_reg_len(int max_val)
{
return snprintf(NULL, 0, "%x", max_val);
}
@@ -173,8 +173,7 @@ static inline void regmap_calc_tot_len(struct regmap *map,
{
/* Calculate the length of a fixed format */
if (!map->debugfs_tot_len) {
- map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
- buf, count);
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
map->debugfs_val_len = 2 * map->format.val_bytes;
map->debugfs_tot_len = map->debugfs_reg_len +
map->debugfs_val_len + 3; /* : \n */
@@ -338,6 +337,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
char *buf;
char *entry;
int ret;
+ unsigned entry_len;
if (*ppos < 0 || !count)
return -EINVAL;
@@ -365,18 +365,15 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
p = 0;
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
- snprintf(entry, PAGE_SIZE, "%x-%x",
- c->base_reg, c->max_reg);
+ entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
+ c->base_reg, c->max_reg);
if (p >= *ppos) {
- if (buf_pos + 1 + strlen(entry) > count)
+ if (buf_pos + entry_len > count)
break;
- snprintf(buf + buf_pos, count - buf_pos,
- "%s", entry);
- buf_pos += strlen(entry);
- buf[buf_pos] = '\n';
- buf_pos++;
+ memcpy(buf + buf_pos, entry, entry_len);
+ buf_pos += entry_len;
}
- p += strlen(entry) + 1;
+ p += entry_len;
}
mutex_unlock(&map->cache_lock);
@@ -420,7 +417,7 @@ static ssize_t regmap_access_read_file(struct file *file,
return -ENOMEM;
/* Calculate the length of a fixed format */
- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
+ reg_len = regmap_calc_reg_len(map->max_register);
tot_len = reg_len + 10; /* ': R W V P\n' */
for (i = 0; i <= map->max_register; i += map->reg_stride) {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 38d1f72d869c..8d16db533527 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -63,6 +63,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
struct regmap *map = d->map;
int i, ret;
u32 reg;
+ u32 unmask_offset;
if (d->chip->runtime_pm) {
ret = pm_runtime_get_sync(map->dev);
@@ -79,12 +80,28 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < d->chip->num_regs; i++) {
reg = d->chip->mask_base +
(i * map->reg_stride * d->irq_reg_stride);
- if (d->chip->mask_invert)
+ if (d->chip->mask_invert) {
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]);
- else
+ } else if (d->chip->unmask_base) {
+ /* set mask with mask_base register */
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], ~d->mask_buf[i]);
+ if (ret < 0)
+ dev_err(d->map->dev,
+ "Failed to sync unmasks in %x\n",
+ reg);
+ unmask_offset = d->chip->unmask_base -
+ d->chip->mask_base;
+ /* clear mask with unmask_base register */
+ ret = regmap_update_bits(d->map,
+ reg + unmask_offset,
+ d->mask_buf_def[i],
+ d->mask_buf[i]);
+ } else {
ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], d->mask_buf[i]);
+ }
if (ret != 0)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
@@ -116,7 +133,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
reg = d->chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_write(map, reg, d->mask_buf[i]);
+ /* some chips ack by write 0 */
+ if (d->chip->ack_invert)
+ ret = regmap_write(map, reg, ~d->mask_buf[i]);
+ else
+ ret = regmap_write(map, reg, d->mask_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
@@ -339,6 +360,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int i;
int ret = -ENOMEM;
u32 reg;
+ u32 unmask_offset;
if (chip->num_regs <= 0)
return -EINVAL;
@@ -420,7 +442,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (chip->mask_invert)
ret = regmap_update_bits(map, reg,
d->mask_buf[i], ~d->mask_buf[i]);
- else
+ else if (d->chip->unmask_base) {
+ unmask_offset = d->chip->unmask_base -
+ d->chip->mask_base;
+ ret = regmap_update_bits(d->map,
+ reg + unmask_offset,
+ d->mask_buf[i],
+ d->mask_buf[i]);
+ } else
ret = regmap_update_bits(map, reg,
d->mask_buf[i], d->mask_buf[i]);
if (ret != 0) {
@@ -445,7 +474,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_write(map, reg,
+ if (chip->ack_invert)
+ ret = regmap_write(map, reg,
+ ~(d->status_buf[i] & d->mask_buf[i]));
+ else
+ ret = regmap_write(map, reg,
d->status_buf[i] & d->mask_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index afaf56200674..4ac63c0e50c7 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -561,6 +561,16 @@ struct regmap *__regmap_init(struct device *dev,
}
map->lock_arg = map;
}
+
+ /*
+ * When we write in fast-paths with regmap_bulk_write() don't allocate
+ * scratch buffers with sleeping allocations.
+ */
+ if ((bus && bus->fast_io) || config->fast_io)
+ map->alloc_flags = GFP_ATOMIC;
+ else
+ map->alloc_flags = GFP_KERNEL;
+
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
@@ -619,6 +629,7 @@ struct regmap *__regmap_init(struct device *dev,
goto skip_format_initialization;
} else {
map->reg_read = _regmap_bus_read;
+ map->reg_update_bits = bus->reg_update_bits;
}
reg_endian = regmap_get_reg_endian(bus, config);
@@ -1786,7 +1797,7 @@ out:
if (!val_count)
return -EINVAL;
- wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
if (!wval) {
dev_err(map->dev, "Error in memory allocation\n");
return -ENOMEM;
@@ -2509,20 +2520,26 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
unsigned int tmp, orig;
- ret = _regmap_read(map, reg, &orig);
- if (ret != 0)
- return ret;
+ if (change)
+ *change = false;
- tmp = orig & ~mask;
- tmp |= val & mask;
-
- if (force_write || (tmp != orig)) {
- ret = _regmap_write(map, reg, tmp);
- if (change)
+ if (regmap_volatile(map, reg) && map->reg_update_bits) {
+ ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+ if (ret == 0 && change)
*change = true;
} else {
- if (change)
- *change = false;
+ ret = _regmap_read(map, reg, &orig);
+ if (ret != 0)
+ return ret;
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (force_write || (tmp != orig)) {
+ ret = _regmap_write(map, reg, tmp);
+ if (ret == 0 && change)
+ *change = true;
+ }
}
return ret;
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 39fca01c8fa1..75b98aad6faf 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -16,7 +16,6 @@
#include <linux/err.h>
static DEFINE_IDA(soc_ida);
-static DEFINE_SPINLOCK(soc_lock);
static ssize_t soc_info_get(struct device *dev,
struct device_attribute *attr,
@@ -122,20 +121,10 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
}
/* Fetch a unique (reclaimable) SOC ID. */
- do {
- if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
- ret = -ENOMEM;
- goto out2;
- }
-
- spin_lock(&soc_lock);
- ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
- spin_unlock(&soc_lock);
-
- } while (ret == -EAGAIN);
-
- if (ret)
+ ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out2;
+ soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
soc_dev->dev.bus = &soc_bus_type;
@@ -151,7 +140,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
return soc_dev;
out3:
- ida_remove(&soc_ida, soc_dev->soc_dev_num);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
out2:
kfree(soc_dev);
out1:
@@ -161,7 +150,7 @@ out1:
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
- ida_remove(&soc_ida, soc_dev->soc_dev_num);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
device_unregister(&soc_dev->dev);
}