diff options
Diffstat (limited to 'drivers/base')
42 files changed, 2943 insertions, 1260 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h index 1782f3aa386e..e05db388bd1c 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -131,6 +131,8 @@ extern void device_remove_groups(struct device *dev, extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); +extern void device_block_probing(void); +extern void device_unblock_probing(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/class.c b/drivers/base/class.c index 6e810881e48b..71059e32bebc 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device); * * Note, you will need to drop the reference with put_device() after use. * - * @fn is allowed to do anything including calling back into class + * @match is allowed to do anything including calling back into class * code. There's no locking restriction. */ struct device *class_find_device(struct class *class, struct device *start, diff --git a/drivers/base/component.c b/drivers/base/component.c index f748430bb654..04a1582e80bb 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -18,18 +18,24 @@ #include <linux/mutex.h> #include <linux/slab.h> +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + struct component_match { size_t alloc; size_t num; - struct { - void *data; - int (*fn)(struct device *, void *); - } compare[0]; + struct component_match_array *compare; }; struct master { struct list_head node; - struct list_head components; bool bound; const struct component_master_ops *ops; @@ -39,7 +45,6 @@ struct master { struct component { struct list_head node; - struct list_head master_node; struct master *master; bool bound; @@ -63,48 +68,21 @@ static struct master *__master_find(struct device *dev, return NULL; } -/* Attach an unattached component to a master. */ -static void component_attach_master(struct master *master, struct component *c) -{ - c->master = master; - - list_add_tail(&c->master_node, &master->components); -} - -/* Detach a component from a master. */ -static void component_detach_master(struct master *master, struct component *c) -{ - list_del(&c->master_node); - - c->master = NULL; -} - -/* - * Add a component to a master, finding the component via the compare - * function and compare data. This is safe to call for duplicate matches - * and will not result in the same component being added multiple times. - */ -int component_master_add_child(struct master *master, +static struct component *find_component(struct master *master, int (*compare)(struct device *, void *), void *compare_data) { struct component *c; - int ret = -ENXIO; list_for_each_entry(c, &component_list, node) { if (c->master && c->master != master) continue; - if (compare(c->dev, compare_data)) { - if (!c->master) - component_attach_master(master, c); - ret = 0; - break; - } + if (compare(c->dev, compare_data)) + return c; } - return ret; + return NULL; } -EXPORT_SYMBOL_GPL(component_master_add_child); static int find_components(struct master *master) { @@ -112,39 +90,44 @@ static int find_components(struct master *master) size_t i; int ret = 0; - if (!match) { - /* - * Search the list of components, looking for components that - * belong to this master, and attach them to the master. - */ - return master->ops->add_components(master->dev, master); - } - /* * Scan the array of match functions and attach * any components which are found to this master. */ for (i = 0; i < match->num; i++) { - ret = component_master_add_child(master, - match->compare[i].fn, - match->compare[i].data); - if (ret) + struct component_match_array *mc = &match->compare[i]; + struct component *c; + + dev_dbg(master->dev, "Looking for component %zu\n", i); + + if (match->compare[i].component) + continue; + + c = find_component(master, mc->compare, mc->data); + if (!c) { + ret = -ENXIO; break; + } + + dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master); + + /* Attach this component to the master */ + match->compare[i].duplicate = !!c->master; + match->compare[i].component = c; + c->master = master; } return ret; } -/* Detach all attached components from this master */ -static void master_remove_components(struct master *master) +/* Detach component from associated master */ +static void remove_component(struct master *master, struct component *c) { - while (!list_empty(&master->components)) { - struct component *c = list_first_entry(&master->components, - struct component, master_node); - - WARN_ON(c->master != master); + size_t i; - component_detach_master(master, c); - } + /* Detach the component from this master. */ + for (i = 0; i < master->match->num; i++) + if (master->match->compare[i].component == c) + master->match->compare[i].component = NULL; } /* @@ -159,44 +142,32 @@ static int try_to_bring_up_master(struct master *master, { int ret; - if (master->bound) - return 0; + dev_dbg(master->dev, "trying to bring up master\n"); - /* - * Search the list of components, looking for components that - * belong to this master, and attach them to the master. - */ if (find_components(master)) { - /* Failed to find all components */ - ret = 0; - goto out; + dev_dbg(master->dev, "master has incomplete components\n"); + return 0; } if (component && component->master != master) { - ret = 0; - goto out; + dev_dbg(master->dev, "master is not for this component (%s)\n", + dev_name(component->dev)); + return 0; } - if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) + return -ENOMEM; /* Found all components */ ret = master->ops->bind(master->dev); if (ret < 0) { devres_release_group(master->dev, NULL); dev_info(master->dev, "master bind failed: %d\n", ret); - goto out; + return ret; } master->bound = true; return 1; - -out: - master_remove_components(master); - - return ret; } static int try_to_bring_up_masters(struct component *component) @@ -205,9 +176,11 @@ static int try_to_bring_up_masters(struct component *component) int ret = 0; list_for_each_entry(m, &masters, node) { - ret = try_to_bring_up_master(m, component); - if (ret != 0) - break; + if (!m->bound) { + ret = try_to_bring_up_master(m, component); + if (ret != 0) + break; + } } return ret; @@ -220,45 +193,59 @@ static void take_down_master(struct master *master) devres_release_group(master->dev, NULL); master->bound = false; } +} + +static void component_match_release(struct device *master, + struct component_match *match) +{ + unsigned int i; + + for (i = 0; i < match->num; i++) { + struct component_match_array *mc = &match->compare[i]; + + if (mc->release) + mc->release(master, mc->data); + } - master_remove_components(master); + kfree(match->compare); } -static size_t component_match_size(size_t num) +static void devm_component_match_release(struct device *dev, void *res) { - return offsetof(struct component_match, compare[num]); + component_match_release(dev, res); } -static struct component_match *component_match_realloc(struct device *dev, +static int component_match_realloc(struct device *dev, struct component_match *match, size_t num) { - struct component_match *new; + struct component_match_array *new; - if (match && match->alloc == num) - return match; + if (match->alloc == num) + return 0; - new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL); + new = kmalloc_array(num, sizeof(*new), GFP_KERNEL); if (!new) - return ERR_PTR(-ENOMEM); + return -ENOMEM; - if (match) { - memcpy(new, match, component_match_size(min(match->num, num))); - devm_kfree(dev, match); - } else { - new->num = 0; + if (match->compare) { + memcpy(new, match->compare, sizeof(*new) * + min(match->num, num)); + kfree(match->compare); } + match->compare = new; + match->alloc = num; - new->alloc = num; - - return new; + return 0; } /* - * Add a component to be matched. + * Add a component to be matched, with a release function. * * The match array is first created or extended if necessary. */ -void component_match_add(struct device *dev, struct component_match **matchptr, +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data) { struct component_match *match = *matchptr; @@ -266,22 +253,55 @@ void component_match_add(struct device *dev, struct component_match **matchptr, if (IS_ERR(match)) return; - if (!match || match->num == match->alloc) { - size_t new_size = match ? match->alloc + 16 : 15; + if (!match) { + match = devres_alloc(devm_component_match_release, + sizeof(*match), GFP_KERNEL); + if (!match) { + *matchptr = ERR_PTR(-ENOMEM); + return; + } - match = component_match_realloc(dev, match, new_size); + devres_add(master, match); *matchptr = match; + } + + if (match->num == match->alloc) { + size_t new_size = match ? match->alloc + 16 : 15; + int ret; - if (IS_ERR(match)) + ret = component_match_realloc(master, match, new_size); + if (ret) { + *matchptr = ERR_PTR(ret); return; + } } - match->compare[match->num].fn = compare; + match->compare[match->num].compare = compare; + match->compare[match->num].release = release; match->compare[match->num].data = compare_data; + match->compare[match->num].component = NULL; match->num++; } -EXPORT_SYMBOL(component_match_add); +EXPORT_SYMBOL(component_match_add_release); + +static void free_master(struct master *master) +{ + struct component_match *match = master->match; + int i; + + list_del(&master->node); + + if (match) { + for (i = 0; i < match->num; i++) { + struct component *c = match->compare[i].component; + if (c) + c->master = NULL; + } + } + + kfree(master); +} int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, @@ -290,15 +310,10 @@ int component_master_add_with_match(struct device *dev, struct master *master; int ret; - if (ops->add_components && match) - return -EINVAL; - - if (match) { - /* Reallocate the match array for its true size */ - match = component_match_realloc(dev, match, match->num); - if (IS_ERR(match)) - return PTR_ERR(match); - } + /* Reallocate the match array for its true size */ + ret = component_match_realloc(dev, match, match->num); + if (ret) + return ret; master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) @@ -307,7 +322,6 @@ int component_master_add_with_match(struct device *dev, master->dev = dev; master->ops = ops; master->match = match; - INIT_LIST_HEAD(&master->components); /* Add to the list of available masters. */ mutex_lock(&component_mutex); @@ -315,24 +329,15 @@ int component_master_add_with_match(struct device *dev, ret = try_to_bring_up_master(master, NULL); - if (ret < 0) { - /* Delete off the list if we weren't successful */ - list_del(&master->node); - kfree(master); - } + if (ret < 0) + free_master(master); + mutex_unlock(&component_mutex); return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(component_master_add_with_match); -int component_master_add(struct device *dev, - const struct component_master_ops *ops) -{ - return component_master_add_with_match(dev, ops, NULL); -} -EXPORT_SYMBOL_GPL(component_master_add); - void component_master_del(struct device *dev, const struct component_master_ops *ops) { @@ -342,9 +347,7 @@ void component_master_del(struct device *dev, master = __master_find(dev, ops); if (master) { take_down_master(master); - - list_del(&master->node); - kfree(master); + free_master(master); } mutex_unlock(&component_mutex); } @@ -366,6 +369,7 @@ void component_unbind_all(struct device *master_dev, void *data) { struct master *master; struct component *c; + size_t i; WARN_ON(!mutex_is_locked(&component_mutex)); @@ -373,8 +377,12 @@ void component_unbind_all(struct device *master_dev, void *data) if (!master) return; - list_for_each_entry_reverse(c, &master->components, master_node) - component_unbind(c, master, data); + /* Unbind components in reverse order */ + for (i = master->match->num; i--; ) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + component_unbind(c, master, data); + } } EXPORT_SYMBOL_GPL(component_unbind_all); @@ -434,6 +442,7 @@ int component_bind_all(struct device *master_dev, void *data) { struct master *master; struct component *c; + size_t i; int ret = 0; WARN_ON(!mutex_is_locked(&component_mutex)); @@ -442,16 +451,21 @@ int component_bind_all(struct device *master_dev, void *data) if (!master) return -EINVAL; - list_for_each_entry(c, &master->components, master_node) { - ret = component_bind(c, master, data); - if (ret) - break; - } + /* Bind components in match order */ + for (i = 0; i < master->match->num; i++) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + ret = component_bind(c, master, data); + if (ret) + break; + } if (ret != 0) { - list_for_each_entry_continue_reverse(c, &master->components, - master_node) - component_unbind(c, master, data); + for (; i--; ) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + component_unbind(c, master, data); + } } return ret; @@ -477,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops) ret = try_to_bring_up_masters(component); if (ret < 0) { + if (component->master) + remove_component(component->master, component); list_del(&component->node); kfree(component); @@ -499,8 +515,10 @@ void component_del(struct device *dev, const struct component_ops *ops) break; } - if (component && component->master) + if (component && component->master) { take_down_master(component->master); + remove_component(component->master, component); + } mutex_unlock(&component_mutex); diff --git a/drivers/base/core.c b/drivers/base/core.c index 334ec7ef1960..0a8bdade53f2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1066,7 +1066,7 @@ int device_add(struct device *dev) dev->kobj.parent = kobj; /* use parent numa_node */ - if (parent) + if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) set_dev_node(dev, dev_to_node(parent)); /* first, register with generic layer. */ @@ -2261,7 +2261,10 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) if (fwnode_is_primary(fn)) fn = fn->secondary; - fwnode->secondary = fn; + if (fn) { + WARN_ON(fwnode->secondary); + fwnode->secondary = fn; + } dev->fwnode = fwnode; } else { dev->fwnode = fwnode_is_primary(dev->fwnode) ? diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 91bbb1959d8d..691eeea2f19a 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -200,7 +200,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = { struct cpu_attr { struct device_attribute attr; - const struct cpumask *const * const map; + const struct cpumask *const map; }; static ssize_t show_cpus_attr(struct device *dev, @@ -209,7 +209,7 @@ static ssize_t show_cpus_attr(struct device *dev, { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); - return cpumap_print_to_pagebuf(true, buf, *ca->map); + return cpumap_print_to_pagebuf(true, buf, ca->map); } #define _CPU_ATTR(name, map) \ @@ -217,9 +217,9 @@ static ssize_t show_cpus_attr(struct device *dev, /* Keep in sync with cpu_subsys_attrs */ static struct cpu_attr cpu_attrs[] = { - _CPU_ATTR(online, &cpu_online_mask), - _CPU_ATTR(possible, &cpu_possible_mask), - _CPU_ATTR(present, &cpu_present_mask), + _CPU_ATTR(online, &__cpu_online_mask), + _CPU_ATTR(possible, &__cpu_possible_mask), + _CPU_ATTR(present, &__cpu_present_mask), }; /* diff --git a/drivers/base/dd.c b/drivers/base/dd.c index be0eb4639128..16688f50729c 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -55,6 +55,13 @@ static struct workqueue_struct *deferred_wq; static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /* + * In some cases, like suspend to RAM or hibernation, It might be reasonable + * to prohibit probing of devices as it could be unsafe. + * Once defer_all_probes is true all drivers probes will be forcibly deferred. + */ +static bool defer_all_probes; + +/* * deferred_probe_work_func() - Retry probing devices in the active list. */ static void deferred_probe_work_func(struct work_struct *work) @@ -172,6 +179,30 @@ static void driver_deferred_probe_trigger(void) } /** + * device_block_probing() - Block/defere device's probes + * + * It will disable probing of devices and defer their probes instead. + */ +void device_block_probing(void) +{ + defer_all_probes = true; + /* sync with probes to avoid races. */ + wait_for_device_probe(); +} + +/** + * device_unblock_probing() - Unblock/enable device's probes + * + * It will restore normal behavior and trigger re-probing of deferred + * devices. + */ +void device_unblock_probing(void) +{ + defer_all_probes = false; + driver_deferred_probe_trigger(); +} + +/** * deferred_probe_initcall() - Enable probing of deferred devices * * We don't want to get in the way when the bulk of drivers are getting probed. @@ -192,9 +223,23 @@ static int deferred_probe_initcall(void) } late_initcall(deferred_probe_initcall); +/** + * device_is_bound() - Check if device is bound to a driver + * @dev: device to check + * + * Returns true if passed device has already finished probing successfully + * against a driver. + * + * This function must be called with the device lock held. + */ +bool device_is_bound(struct device *dev) +{ + return dev->p && klist_node_attached(&dev->p->knode_driver); +} + static void driver_bound(struct device *dev) { - if (klist_node_attached(&dev->p->knode_driver)) { + if (device_is_bound(dev)) { printk(KERN_WARNING "%s: device %s already bound\n", __func__, kobject_name(&dev->kobj)); return; @@ -205,6 +250,8 @@ static void driver_bound(struct device *dev) klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); + device_pm_check_callbacks(dev); + /* * Make sure the device is no longer in one of the deferred lists and * kick off retrying all pending devices @@ -268,6 +315,9 @@ int device_bind_driver(struct device *dev) ret = driver_sysfs_add(dev); if (!ret) driver_bound(dev); + else if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DRIVER_NOT_BOUND, dev); return ret; } EXPORT_SYMBOL_GPL(device_bind_driver); @@ -277,9 +327,20 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); static int really_probe(struct device *dev, struct device_driver *drv) { - int ret = 0; + int ret = -EPROBE_DEFER; int local_trigger_count = atomic_read(&deferred_trigger_count); + if (defer_all_probes) { + /* + * Value of defer_all_probes can be set only by + * device_defer_all_probes_enable() which, in turn, will call + * wait_for_device_probe() right after that to avoid any races. + */ + dev_dbg(dev, "Driver %s force probe deferral\n", drv->name); + driver_deferred_probe_add(dev); + return ret; + } + atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", drv->bus->name, __func__, drv->name, dev_name(dev)); @@ -290,7 +351,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) /* If using pinctrl, bind pins now before probing */ ret = pinctrl_bind_pins(dev); if (ret) - goto probe_failed; + goto pinctrl_bind_failed; if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", @@ -322,6 +383,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto probe_failed; } + pinctrl_init_done(dev); + if (dev->pm_domain && dev->pm_domain->sync) dev->pm_domain->sync(dev); @@ -332,12 +395,17 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto done; probe_failed: + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DRIVER_NOT_BOUND, dev); +pinctrl_bind_failed: devres_release_all(dev); driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); switch (ret) { case -EPROBE_DEFER: @@ -391,6 +459,10 @@ int driver_probe_done(void) */ void wait_for_device_probe(void) { + /* wait for the deferred probe workqueue to finish */ + if (driver_deferred_probe_enable) + flush_workqueue(deferred_wq); + /* wait for the known devices to complete their probing */ wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); async_synchronize_full(); @@ -488,6 +560,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) struct device_attach_data *data = _data; struct device *dev = data->dev; bool async_allowed; + int ret; /* * Check if device has already been claimed. This may @@ -498,8 +571,17 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) if (dev->driver) return -EBUSY; - if (!driver_match_device(drv, dev)) + ret = driver_match_device(drv, dev); + if (ret == 0) { + /* no match */ return 0; + } else if (ret == -EPROBE_DEFER) { + dev_dbg(dev, "Device match requests probe deferral\n"); + driver_deferred_probe_add(dev); + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d", ret); + return ret; + } /* ret > 0 means positive match */ async_allowed = driver_allows_async_probing(drv); @@ -545,7 +627,7 @@ static int __device_attach(struct device *dev, bool allow_async) device_lock(dev); if (dev->driver) { - if (klist_node_attached(&dev->p->knode_driver)) { + if (device_is_bound(dev)) { ret = 1; goto out_unlock; } @@ -619,6 +701,7 @@ void device_initial_probe(struct device *dev) static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; + int ret; /* * Lock device and try to bind to it. We drop the error @@ -630,8 +713,17 @@ static int __driver_attach(struct device *dev, void *data) * is an error. */ - if (!driver_match_device(drv, dev)) + ret = driver_match_device(drv, dev); + if (ret == 0) { + /* no match */ return 0; + } else if (ret == -EPROBE_DEFER) { + dev_dbg(dev, "Device match requests probe deferral\n"); + driver_deferred_probe_add(dev); + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d", ret); + return ret; + } /* ret > 0 means positive match */ if (dev->parent) /* Needed for USB */ device_lock(dev->parent); @@ -693,13 +785,14 @@ static void __device_release_driver(struct device *dev) dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); klist_remove(&dev->p->knode_driver); + device_pm_check_callbacks(dev); if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBOUND_DRIVER, dev); - } } diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 875464690117..8fc654f0807b 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -82,12 +82,12 @@ static struct devres_group * node_to_group(struct devres_node *node) } static __always_inline struct devres * alloc_dr(dr_release_t release, - size_t size, gfp_t gfp) + size_t size, gfp_t gfp, int nid) { size_t tot_size = sizeof(struct devres) + size; struct devres *dr; - dr = kmalloc_track_caller(tot_size, gfp); + dr = kmalloc_node_track_caller(tot_size, gfp, nid); if (unlikely(!dr)) return NULL; @@ -106,24 +106,25 @@ static void add_dr(struct device *dev, struct devres_node *node) } #ifdef CONFIG_DEBUG_DEVRES -void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, +void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name) { struct devres *dr; - dr = alloc_dr(release, size, gfp | __GFP_ZERO); + dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, name, size); return dr->data; } -EXPORT_SYMBOL_GPL(__devres_alloc); +EXPORT_SYMBOL_GPL(__devres_alloc_node); #else /** * devres_alloc - Allocate device resource data * @release: Release function devres will be associated with * @size: Allocation size * @gfp: Allocation flags + * @nid: NUMA node * * Allocate devres of @size bytes. The allocated area is zeroed, then * associated with @release. The returned pointer can be passed to @@ -132,16 +133,16 @@ EXPORT_SYMBOL_GPL(__devres_alloc); * RETURNS: * Pointer to allocated devres on success, NULL on failure. */ -void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) { struct devres *dr; - dr = alloc_dr(release, size, gfp | __GFP_ZERO); + dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); if (unlikely(!dr)) return NULL; return dr->data; } -EXPORT_SYMBOL_GPL(devres_alloc); +EXPORT_SYMBOL_GPL(devres_alloc_node); #endif /** @@ -776,7 +777,7 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) struct devres *dr; /* use raw alloc_dr for kmalloc caller tracing */ - dr = alloc_dr(devm_kmalloc_release, size, gfp); + dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); if (unlikely(!dr)) return NULL; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 68f03141e432..44a74cf1372c 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -215,9 +215,9 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid, newattrs.ia_uid = uid; newattrs.ia_gid = gid; newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID; - mutex_lock(&d_inode(dentry)->i_mutex); + inode_lock(d_inode(dentry)); notify_change(dentry, &newattrs, NULL); - mutex_unlock(&d_inode(dentry)->i_mutex); + inode_unlock(d_inode(dentry)); /* mark as kernel-created inode */ d_inode(dentry)->i_private = &thread; @@ -244,7 +244,7 @@ static int dev_rmdir(const char *name) err = -ENOENT; } dput(dentry); - mutex_unlock(&d_inode(parent.dentry)->i_mutex); + inode_unlock(d_inode(parent.dentry)); path_put(&parent); return err; } @@ -321,9 +321,9 @@ static int handle_remove(const char *nodename, struct device *dev) newattrs.ia_mode = stat.mode & ~0777; newattrs.ia_valid = ATTR_UID|ATTR_GID|ATTR_MODE; - mutex_lock(&d_inode(dentry)->i_mutex); + inode_lock(d_inode(dentry)); notify_change(dentry, &newattrs, NULL); - mutex_unlock(&d_inode(dentry)->i_mutex); + inode_unlock(d_inode(dentry)); err = vfs_unlink(d_inode(parent.dentry), dentry, NULL); if (!err || err == -ENOENT) deleted = 1; @@ -332,7 +332,7 @@ static int handle_remove(const char *nodename, struct device *dev) err = -ENOENT; } dput(dentry); - mutex_unlock(&d_inode(parent.dentry)->i_mutex); + inode_unlock(d_inode(parent.dentry)); path_put(&parent); if (deleted && strchr(nodename, '/')) diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index a12ff9863d7e..e167a1e1bccb 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -46,7 +46,7 @@ struct cma *dma_contiguous_default_area; * Users, who want to set the size of global CMA area for their system * should use cma= kernel parameter. */ -static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; +static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; static phys_addr_t size_cmdline = -1; static phys_addr_t base_cmdline; static phys_addr_t limit_cmdline; diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index d95c5971c225..d799662f19eb 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -12,7 +12,6 @@ #include <linux/gfp.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <asm-generic/dma-coherent.h> /* * Managed DMA API @@ -167,7 +166,7 @@ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, } EXPORT_SYMBOL(dmam_free_noncoherent); -#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT static void dmam_coherent_decl_release(struct device *dev, void *res) { @@ -247,7 +246,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { int ret = -ENXIO; -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP) unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); @@ -264,7 +263,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, user_count << PAGE_SHIFT, vma->vm_page_prot); } -#endif /* CONFIG_MMU */ +#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ return ret; } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 8524450e75bd..b9250e564ebf 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -1118,15 +1118,17 @@ static int _request_firmware(const struct firmware **firmware_p, const char *name, struct device *device, unsigned int opt_flags) { - struct firmware *fw; + struct firmware *fw = NULL; long timeout; int ret; if (!firmware_p) return -EINVAL; - if (!name || name[0] == '\0') - return -EINVAL; + if (!name || name[0] == '\0') { + ret = -EINVAL; + goto out; + } ret = _request_firmware_prepare(&fw, name, device); if (ret <= 0) /* error or already assigned */ diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2804aed3f416..213456c2b123 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev) if (mem->state == MEM_OFFLINE) return 0; + /* Can't offline block with non-present sections */ + if (mem->section_count != sections_per_block) + return -EINVAL; + return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); } @@ -446,8 +450,7 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u64 phys_addr; - int nid; - int i, ret; + int nid, ret; unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; ret = kstrtoull(buf, 0, &phys_addr); @@ -457,15 +460,12 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) return -EINVAL; - for (i = 0; i < sections_per_block; i++) { - nid = memory_add_physaddr_to_nid(phys_addr); - ret = add_memory(nid, phys_addr, - PAGES_PER_SECTION << PAGE_SHIFT); - if (ret) - goto out; + nid = memory_add_physaddr_to_nid(phys_addr); + ret = add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block); - phys_addr += MIN_MEMORY_BLOCK_SIZE; - } + if (ret) + goto out; ret = count; out: @@ -614,7 +614,6 @@ static int init_memory_block(struct memory_block **memory, base_memory_block_id(scn_nr) * sections_per_block; mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; mem->state = state; - mem->section_count++; start_pfn = section_nr_to_pfn(mem->start_section_nr); mem->phys_device = arch_get_memory_phys_device(start_pfn); @@ -648,6 +647,13 @@ static int add_memory_block(int base_section_nr) return 0; } +static bool is_zone_device_section(struct mem_section *ms) +{ + struct page *page; + + page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms)); + return is_zone_device_page(page); +} /* * need an interface for the VM to add new memory regions, @@ -658,6 +664,9 @@ int register_new_memory(int nid, struct mem_section *section) int ret = 0; struct memory_block *mem; + if (is_zone_device_section(section)) + return 0; + mutex_lock(&mem_sysfs_mutex); mem = find_memory_block(section); @@ -668,6 +677,7 @@ int register_new_memory(int nid, struct mem_section *section) ret = init_memory_block(&mem, section, MEM_OFFLINE); if (ret) goto out; + mem->section_count++; } if (mem->section_count == sections_per_block) @@ -688,11 +698,14 @@ unregister_memory(struct memory_block *memory) device_unregister(&memory->dev); } -static int remove_memory_block(unsigned long node_id, +static int remove_memory_section(unsigned long node_id, struct mem_section *section, int phys_device) { struct memory_block *mem; + if (is_zone_device_section(section)) + return 0; + mutex_lock(&mem_sysfs_mutex); mem = find_memory_block(section); unregister_mem_sect_under_nodes(mem, __section_nr(section)); @@ -712,7 +725,7 @@ int unregister_memory_section(struct mem_section *section) if (!present_section(section)) return -EINVAL; - return remove_memory_block(0, section, 0); + return remove_memory_section(0, section, 0); } #endif /* CONFIG_MEMORY_HOTREMOVE */ diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c index 5fb74b43848e..076297592754 100644 --- a/drivers/base/pinctrl.c +++ b/drivers/base/pinctrl.c @@ -42,9 +42,20 @@ int pinctrl_bind_pins(struct device *dev) goto cleanup_get; } - ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state); + dev->pins->init_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_INIT); + if (IS_ERR(dev->pins->init_state)) { + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no init pinctrl state\n"); + + ret = pinctrl_select_state(dev->pins->p, + dev->pins->default_state); + } else { + ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state); + } + if (ret) { - dev_dbg(dev, "failed to activate default pinctrl state\n"); + dev_dbg(dev, "failed to activate initial pinctrl state\n"); goto cleanup_get; } diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 134483daac25..279e53989374 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -24,13 +24,17 @@ #include <linux/msi.h> #include <linux/slab.h> -#define DEV_ID_SHIFT 24 +#define DEV_ID_SHIFT 21 +#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) /* * Internal data structure containing a (made up, but unique) devid * and the callback to write the MSI message. */ struct platform_msi_priv_data { + struct device *dev; + void *host_data; + msi_alloc_info_t arg; irq_write_msi_msg_t write_msg; int devid; }; @@ -110,39 +114,49 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info) chip->irq_write_msi_msg = platform_msi_write_msg; } -static void platform_msi_free_descs(struct device *dev) +static void platform_msi_free_descs(struct device *dev, int base, int nvec) { struct msi_desc *desc, *tmp; list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { - list_del(&desc->list); - free_msi_entry(desc); + if (desc->platform.msi_index >= base && + desc->platform.msi_index < (base + nvec)) { + list_del(&desc->list); + free_msi_entry(desc); + } } } -static int platform_msi_alloc_descs(struct device *dev, int nvec, - struct platform_msi_priv_data *data) +static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq, + int nvec, + struct platform_msi_priv_data *data) { - int i; + struct msi_desc *desc; + int i, base = 0; - for (i = 0; i < nvec; i++) { - struct msi_desc *desc; + if (!list_empty(dev_to_msi_list(dev))) { + desc = list_last_entry(dev_to_msi_list(dev), + struct msi_desc, list); + base = desc->platform.msi_index + 1; + } + for (i = 0; i < nvec; i++) { desc = alloc_msi_entry(dev); if (!desc) break; desc->platform.msi_priv_data = data; - desc->platform.msi_index = i; + desc->platform.msi_index = base + i; desc->nvec_used = 1; + desc->irq = virq ? virq + i : 0; list_add_tail(&desc->list, dev_to_msi_list(dev)); } if (i != nvec) { /* Clean up the mess */ - platform_msi_free_descs(dev); + platform_msi_free_descs(dev, base, nvec); return -ENOMEM; } @@ -150,9 +164,16 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec, return 0; } +static int platform_msi_alloc_descs(struct device *dev, int nvec, + struct platform_msi_priv_data *data) + +{ + return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data); +} + /** * platform_msi_create_irq_domain - Create a platform MSI interrupt domain - * @np: Optional device-tree node of the interrupt controller + * @fwnode: Optional fwnode of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain * @@ -162,7 +183,7 @@ static int platform_msi_alloc_descs(struct device *dev, int nvec, * Returns: * A domain pointer or NULL in case of failure. */ -struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, +struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { @@ -173,63 +194,82 @@ struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) platform_msi_update_chip_ops(info); - domain = msi_create_irq_domain(np, info, parent); + domain = msi_create_irq_domain(fwnode, info, parent); if (domain) domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; return domain; } -/** - * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev - * @dev: The device for which to allocate interrupts - * @nvec: The number of interrupts to allocate - * @write_msi_msg: Callback to write an interrupt message for @dev - * - * Returns: - * Zero for success, or an error code in case of failure - */ -int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, - irq_write_msi_msg_t write_msi_msg) +static struct platform_msi_priv_data * +platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) { - struct platform_msi_priv_data *priv_data; - int err; - + struct platform_msi_priv_data *datap; /* * Limit the number of interrupts to 256 per device. Should we * need to bump this up, DEV_ID_SHIFT should be adjusted * accordingly (which would impact the max number of MSI * capable devices). */ - if (!dev->msi_domain || !write_msi_msg || !nvec || - nvec > (1 << (32 - DEV_ID_SHIFT))) - return -EINVAL; + if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) + return ERR_PTR(-EINVAL); if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { dev_err(dev, "Incompatible msi_domain, giving up\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } /* Already had a helping of MSI? Greed... */ if (!list_empty(dev_to_msi_list(dev))) - return -EBUSY; + return ERR_PTR(-EBUSY); + + datap = kzalloc(sizeof(*datap), GFP_KERNEL); + if (!datap) + return ERR_PTR(-ENOMEM); + + datap->devid = ida_simple_get(&platform_msi_devid_ida, + 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); + if (datap->devid < 0) { + int err = datap->devid; + kfree(datap); + return ERR_PTR(err); + } - priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); - if (!priv_data) - return -ENOMEM; + datap->write_msg = write_msi_msg; + datap->dev = dev; - priv_data->devid = ida_simple_get(&platform_msi_devid_ida, - 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); - if (priv_data->devid < 0) { - err = priv_data->devid; - goto out_free_data; - } + return datap; +} + +static void platform_msi_free_priv_data(struct platform_msi_priv_data *data) +{ + ida_simple_remove(&platform_msi_devid_ida, data->devid); + kfree(data); +} + +/** + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct platform_msi_priv_data *priv_data; + int err; - priv_data->write_msg = write_msi_msg; + priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (IS_ERR(priv_data)) + return PTR_ERR(priv_data); err = platform_msi_alloc_descs(dev, nvec, priv_data); if (err) - goto out_free_id; + goto out_free_priv_data; err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); if (err) @@ -238,14 +278,13 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, return 0; out_free_desc: - platform_msi_free_descs(dev); -out_free_id: - ida_simple_remove(&platform_msi_devid_ida, priv_data->devid); -out_free_data: - kfree(priv_data); + platform_msi_free_descs(dev, 0, nvec); +out_free_priv_data: + platform_msi_free_priv_data(priv_data); return err; } +EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); /** * platform_msi_domain_free_irqs - Free MSI interrupts for @dev @@ -253,18 +292,127 @@ out_free_data: */ void platform_msi_domain_free_irqs(struct device *dev) { - struct msi_desc *desc; + if (!list_empty(dev_to_msi_list(dev))) { + struct msi_desc *desc; + + desc = first_msi_entry(dev); + platform_msi_free_priv_data(desc->platform.msi_priv_data); + } + + msi_domain_free_irqs(dev->msi_domain, dev); + platform_msi_free_descs(dev, 0, MAX_DEV_MSIS); +} +EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); + +/** + * platform_msi_get_host_data - Query the private data associated with + * a platform-msi domain + * @domain: The platform-msi domain + * + * Returns the private data provided when calling + * platform_msi_create_device_domain. + */ +void *platform_msi_get_host_data(struct irq_domain *domain) +{ + struct platform_msi_priv_data *data = domain->host_data; + return data->host_data; +} + +/** + * platform_msi_create_device_domain - Create a platform-msi domain + * + * @dev: The device generating the MSIs + * @nvec: The number of MSIs that need to be allocated + * @write_msi_msg: Callback to write an interrupt message for @dev + * @ops: The hierarchy domain operations to use + * @host_data: Private data associated to this domain + * + * Returns an irqdomain for @nvec interrupts + */ +struct irq_domain * +platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct platform_msi_priv_data *data; + struct irq_domain *domain; + int err; + + data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (IS_ERR(data)) + return NULL; + + data->host_data = host_data; + domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec, + of_node_to_fwnode(dev->of_node), + ops, data); + if (!domain) + goto free_priv; - desc = first_msi_entry(dev); - if (desc) { - struct platform_msi_priv_data *data; + err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg); + if (err) + goto free_domain; + + return domain; - data = desc->platform.msi_priv_data; +free_domain: + irq_domain_remove(domain); +free_priv: + platform_msi_free_priv_data(data); + return NULL; +} + +/** + * platform_msi_domain_free - Free interrupts associated with a platform-msi + * domain + * + * @domain: The platform-msi domain + * @virq: The base irq from which to perform the free operation + * @nvec: How many interrupts to free from @virq + */ +void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nvec) +{ + struct platform_msi_priv_data *data = domain->host_data; + struct msi_desc *desc; + for_each_msi_entry(desc, data->dev) { + if (WARN_ON(!desc->irq || desc->nvec_used != 1)) + return; + if (!(desc->irq >= virq && desc->irq < (virq + nvec))) + continue; - ida_simple_remove(&platform_msi_devid_ida, data->devid); - kfree(data); + irq_domain_free_irqs_common(domain, desc->irq, 1); } +} - msi_domain_free_irqs(dev->msi_domain, dev); - platform_msi_free_descs(dev); +/** + * platform_msi_domain_alloc - Allocate interrupts associated with + * a platform-msi domain + * + * @domain: The platform-msi domain + * @virq: The base irq from which to perform the allocate operation + * @nvec: How many interrupts to free from @virq + * + * Return 0 on success, or an error code on failure. Must be called + * with irq_domain_mutex held (which can only be done as part of a + * top-level interrupt allocation). + */ +int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct platform_msi_priv_data *data = domain->host_data; + int err; + + err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data); + if (err) + return err; + + err = msi_domain_populate_irqs(domain->parent, data->dev, + virq, nr_irqs, &data->arg); + if (err) + platform_msi_domain_free(domain, virq, nr_irqs); + + return err; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index f80aaaf9f610..f437afa17f2b 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -26,6 +26,7 @@ #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/limits.h> +#include <linux/property.h> #include "base.h" #include "power/power.h" @@ -117,6 +118,26 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) EXPORT_SYMBOL_GPL(platform_get_irq); /** + * platform_irq_count - Count the number of IRQs a platform device uses + * @dev: platform device + * + * Return: Number of IRQs a platform device uses or EPROBE_DEFER + */ +int platform_irq_count(struct platform_device *dev) +{ + int ret, nr = 0; + + while ((ret = platform_get_irq(dev, nr)) >= 0) + nr++; + + if (ret == -EPROBE_DEFER) + return ret; + + return nr; +} +EXPORT_SYMBOL_GPL(platform_irq_count); + +/** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type @@ -299,6 +320,22 @@ int platform_device_add_data(struct platform_device *pdev, const void *data, EXPORT_SYMBOL_GPL(platform_device_add_data); /** + * platform_device_add_properties - add built-in properties to a platform device + * @pdev: platform device to add properties to + * @pset: properties to add + * + * The function will take deep copy of the properties in @pset and attach + * the copy to the platform device. The memory associated with properties + * will be freed when the platform device is released. + */ +int platform_device_add_properties(struct platform_device *pdev, + const struct property_set *pset) +{ + return device_add_property_set(&pdev->dev, pset); +} +EXPORT_SYMBOL_GPL(platform_device_add_properties); + +/** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * @@ -409,6 +446,8 @@ void platform_device_del(struct platform_device *pdev) if (r->parent) release_resource(r); } + + device_remove_property_set(&pdev->dev); } } EXPORT_SYMBOL_GPL(platform_device_del); @@ -487,6 +526,12 @@ struct platform_device *platform_device_register_full( if (ret) goto err; + if (pdevinfo->pset) { + ret = platform_device_add_properties(pdev, pdevinfo->pset); + if (ret) + goto err; + } + ret = platform_device_add(pdev); if (ret) { err: @@ -514,9 +559,14 @@ static int platform_drv_probe(struct device *_dev) ret = dev_pm_domain_attach(_dev, true); if (ret != -EPROBE_DEFER) { - ret = drv->probe(dev); - if (ret) - dev_pm_domain_detach(_dev, true); + if (drv->probe) { + ret = drv->probe(dev); + if (ret) + dev_pm_domain_detach(_dev, true); + } else { + /* don't fail if just dev_pm_domain_attach failed */ + ret = 0; + } } if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { @@ -536,9 +586,10 @@ static int platform_drv_remove(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); - int ret; + int ret = 0; - ret = drv->remove(dev); + if (drv->remove) + ret = drv->remove(dev); dev_pm_domain_detach(_dev, true); return ret; @@ -549,8 +600,8 @@ static void platform_drv_shutdown(struct device *_dev) struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); - drv->shutdown(dev); - dev_pm_domain_detach(_dev, true); + if (drv->shutdown) + drv->shutdown(dev); } /** @@ -563,12 +614,9 @@ int __platform_driver_register(struct platform_driver *drv, { drv->driver.owner = owner; drv->driver.bus = &platform_bus_type; - if (drv->probe) - drv->driver.probe = platform_drv_probe; - if (drv->remove) - drv->driver.remove = platform_drv_remove; - if (drv->shutdown) - drv->driver.shutdown = platform_drv_shutdown; + drv->driver.probe = platform_drv_probe; + drv->driver.remove = platform_drv_remove; + drv->driver.shutdown = platform_drv_shutdown; return driver_register(&drv->driver); } @@ -711,6 +759,67 @@ err_out: } EXPORT_SYMBOL_GPL(__platform_create_bundle); +/** + * __platform_register_drivers - register an array of platform drivers + * @drivers: an array of drivers to register + * @count: the number of drivers to register + * @owner: module owning the drivers + * + * Registers platform drivers specified by an array. On failure to register a + * driver, all previously registered drivers will be unregistered. Callers of + * this API should use platform_unregister_drivers() to unregister drivers in + * the reverse order. + * + * Returns: 0 on success or a negative error code on failure. + */ +int __platform_register_drivers(struct platform_driver * const *drivers, + unsigned int count, struct module *owner) +{ + unsigned int i; + int err; + + for (i = 0; i < count; i++) { + pr_debug("registering platform driver %ps\n", drivers[i]); + + err = __platform_driver_register(drivers[i], owner); + if (err < 0) { + pr_err("failed to register platform driver %ps: %d\n", + drivers[i], err); + goto error; + } + } + + return 0; + +error: + while (i--) { + pr_debug("unregistering platform driver %ps\n", drivers[i]); + platform_driver_unregister(drivers[i]); + } + + return err; +} +EXPORT_SYMBOL_GPL(__platform_register_drivers); + +/** + * platform_unregister_drivers - unregister an array of platform drivers + * @drivers: an array of drivers to unregister + * @count: the number of drivers to unregister + * + * Unegisters platform drivers specified by an array. This is typically used + * to complement an earlier call to platform_register_drivers(). Drivers are + * unregistered in the reverse order in which they were registered. + */ +void platform_unregister_drivers(struct platform_driver * const *drivers, + unsigned int count) +{ + while (count--) { + pr_debug("unregistering platform driver %ps\n", drivers[count]); + platform_driver_unregister(drivers[count]); + } +} +EXPORT_SYMBOL_GPL(platform_unregister_drivers); + /* modalias support enables more hands-off userspace setup: * (a) environment variable lets new-style hotplug events work once system is * fully running: "modprobe $MODALIAS" diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index f94a6ccfe787..5998c53280f5 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o -obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 652b5a367c1f..272a52ebafc0 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -15,9 +15,10 @@ #include <linux/clkdev.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> -#ifdef CONFIG_PM +#ifdef CONFIG_PM_CLK enum pce_status { PCE_STATUS_NONE = 0, @@ -93,7 +94,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id, return -ENOMEM; } } else { - if (IS_ERR(clk) || !__clk_get(clk)) { + if (IS_ERR(clk)) { kfree(ce); return -ENOENT; } @@ -127,7 +128,9 @@ int pm_clk_add(struct device *dev, const char *con_id) * @clk: Clock pointer * * Add the clock to the list of clocks used for the power management of @dev. - * It will increment refcount on clock pointer, use clk_put() on it when done. + * The power-management code will take control of the clock reference, so + * callers should not call clk_put() on @clk after this function sucessfully + * returned. */ int pm_clk_add_clk(struct device *dev, struct clk *clk) { @@ -346,7 +349,7 @@ static int pm_clk_notify(struct notifier_block *nb, if (error) break; - dev->pm_domain = clknb->pm_domain; + dev_pm_domain_set(dev, clknb->pm_domain); if (clknb->con_ids[0]) { for (con_id = clknb->con_ids; *con_id; con_id++) pm_clk_add(dev, *con_id); @@ -359,7 +362,7 @@ static int pm_clk_notify(struct notifier_block *nb, if (dev->pm_domain != clknb->pm_domain) break; - dev->pm_domain = NULL; + dev_pm_domain_set(dev, NULL); pm_clk_destroy(dev); break; } @@ -404,7 +407,7 @@ int pm_clk_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -#else /* !CONFIG_PM */ +#else /* !CONFIG_PM_CLK */ /** * enable_clock - Enable a device clock. @@ -471,6 +474,7 @@ static int pm_clk_notify(struct notifier_block *nb, enable_clock(dev, NULL); } break; + case BUS_NOTIFY_DRIVER_NOT_BOUND: case BUS_NOTIFY_UNBOUND_DRIVER: if (clknb->con_ids[0]) { for (con_id = clknb->con_ids; *con_id; con_id++) @@ -484,7 +488,7 @@ static int pm_clk_notify(struct notifier_block *nb, return 0; } -#endif /* !CONFIG_PM */ +#endif /* !CONFIG_PM_CLK */ /** * pm_clk_add_notifier - Add bus type notifier for power management clocks. diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index f32b802b98f4..f6a9ad52cbbf 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -14,6 +14,8 @@ #include <linux/acpi.h> #include <linux/pm_domain.h> +#include "power.h" + /** * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. * @dev: Device to handle. @@ -112,7 +114,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach); /** * dev_pm_domain_detach - Detach a device from its PM domain. - * @dev: Device to attach. + * @dev: Device to detach. * @power_off: Used to indicate whether we should power off the device. * * This functions will reverse the actions from dev_pm_domain_attach() and thus @@ -128,3 +130,25 @@ void dev_pm_domain_detach(struct device *dev, bool power_off) dev->pm_domain->detach(dev, power_off); } EXPORT_SYMBOL_GPL(dev_pm_domain_detach); + +/** + * dev_pm_domain_set - Set PM domain of a device. + * @dev: Device whose PM domain is to be set. + * @pd: PM domain to be set, or NULL. + * + * Sets the PM domain the device belongs to. The PM domain of a device needs + * to be set before its probe finishes (it's bound to a driver). + * + * This function must be called with the device lock held. + */ +void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) +{ + if (dev->pm_domain == pd) + return; + + WARN(pd && device_is_bound(dev), + "PM domains can only be changed for unbound devices\n"); + dev->pm_domain = pd; + device_pm_check_callbacks(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_set); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 16550c63d611..301b785f9f56 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -20,6 +20,8 @@ #include <linux/suspend.h> #include <linux/export.h> +#include "power.h" + #define GENPD_RETRY_MAX_MS 250 /* Approximate */ #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ @@ -34,43 +36,9 @@ __ret; \ }) -#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ -({ \ - ktime_t __start = ktime_get(); \ - type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ - s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ - struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ - if (!__retval && __elapsed > __td->field) { \ - __td->field = __elapsed; \ - dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ - __elapsed); \ - genpd->max_off_time_changed = true; \ - __td->constraint_changed = true; \ - } \ - __retval; \ -}) - static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); -static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) -{ - struct generic_pm_domain *genpd = NULL, *gpd; - - if (IS_ERR_OR_NULL(domain_name)) - return NULL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!strcmp(gpd->name, domain_name)) { - genpd = gpd; - break; - } - } - mutex_unlock(&gpd_list_lock); - return genpd; -} - /* * Get the generic PM domain for a particular struct device. * This validates the struct device pointer, the PM domain pointer, @@ -110,18 +78,12 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, - stop_latency_ns, "stop"); + return GENPD_DEV_CALLBACK(genpd, int, stop, dev); } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, - bool timed) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) { - if (!timed) - return GENPD_DEV_CALLBACK(genpd, int, start, dev); - - return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, - start_latency_ns, "start"); + return GENPD_DEV_CALLBACK(genpd, int, start, dev); } static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) @@ -140,19 +102,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) -{ - s64 usecs64; - - if (!genpd->cpuidle_data) - return; - - usecs64 = genpd->power_on_latency_ns; - do_div(usecs64, NSEC_PER_USEC); - usecs64 += genpd->cpuidle_data->saved_exit_latency; - genpd->cpuidle_data->idle_state->exit_latency = usecs64; -} - static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) { ktime_t time_start; @@ -176,7 +125,6 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; - genpd_recalc_cpu_exit_latency(genpd); pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", genpd->name, "on", elapsed_ns); @@ -213,10 +161,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) } /** - * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). - * @genpd: PM domait to power off. + * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). + * @genpd: PM domain to power off. * - * Queue up the execution of pm_genpd_poweroff() unless it's already been done + * Queue up the execution of genpd_poweroff() unless it's already been done * before. */ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) @@ -225,13 +173,14 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) } /** - * __pm_genpd_poweron - Restore power to a given PM domain and its masters. + * genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. + * @depth: nesting count for lockdep. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -static int __pm_genpd_poweron(struct generic_pm_domain *genpd) +static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) { struct gpd_link *link; int ret = 0; @@ -240,24 +189,22 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->cpuidle_data) { - cpuidle_pause_and_lock(); - genpd->cpuidle_data->idle_state->disabled = true; - cpuidle_resume_and_unlock(); - goto out; - } - /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles * with it. */ list_for_each_entry(link, &genpd->slave_links, slave_node) { - genpd_sd_counter_inc(link->master); + struct generic_pm_domain *master = link->master; + + genpd_sd_counter_inc(master); + + mutex_lock_nested(&master->lock, depth + 1); + ret = genpd_poweron(master, depth + 1); + mutex_unlock(&master->lock); - ret = pm_genpd_poweron(link->master); if (ret) { - genpd_sd_counter_dec(link->master); + genpd_sd_counter_dec(master); goto err; } } @@ -266,7 +213,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) if (ret) goto err; - out: genpd->status = GPD_STATE_ACTIVE; return 0; @@ -281,47 +227,15 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) return ret; } -/** - * pm_genpd_poweron - Restore power to a given PM domain and its masters. - * @genpd: PM domain to power up. - */ -int pm_genpd_poweron(struct generic_pm_domain *genpd) -{ - int ret; - - mutex_lock(&genpd->lock); - ret = __pm_genpd_poweron(genpd); - mutex_unlock(&genpd->lock); - return ret; -} - -/** - * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. - * @domain_name: Name of the PM domain to power up. - */ -int pm_genpd_name_poweron(const char *domain_name) -{ - struct generic_pm_domain *genpd; - - genpd = pm_genpd_lookup_name(domain_name); - return genpd ? pm_genpd_poweron(genpd) : -EINVAL; -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); + return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); } static int genpd_restore_dev(struct generic_pm_domain *genpd, - struct device *dev, bool timed) + struct device *dev) { - if (!timed) - return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); - - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); } static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, @@ -365,13 +279,14 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, } /** - * pm_genpd_poweroff - Remove power from a given PM domain. + * genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. + * @is_async: PM domain is powered down from a scheduled work * * If all of the @genpd's devices have been suspended and all of its subdomains * have been powered down, remove power from @genpd. */ -static int pm_genpd_poweroff(struct generic_pm_domain *genpd) +static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async) { struct pm_domain_data *pdd; struct gpd_link *link; @@ -398,12 +313,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (stat > PM_QOS_FLAGS_NONE) return -EBUSY; - if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe)) + if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe) not_suspended++; } - if (not_suspended > genpd->in_progress) + if (not_suspended > 1 || (not_suspended == 1 && is_async)) return -EBUSY; if (genpd->gov && genpd->gov->power_down_ok) { @@ -411,21 +325,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) return -EAGAIN; } - if (genpd->cpuidle_data) { - /* - * If cpuidle_data is set, cpuidle should turn the domain off - * when the CPU in it is idle. In that case we don't decrement - * the subdomain counts of the master domains, so that power is - * not removed from the current domain prematurely as a result - * of cutting off the masters' power. - */ - genpd->status = GPD_STATE_POWER_OFF; - cpuidle_pause_and_lock(); - genpd->cpuidle_data->idle_state->disabled = false; - cpuidle_resume_and_unlock(); - return 0; - } - if (genpd->power_off) { int ret; @@ -434,10 +333,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) /* * If sd_count > 0 at this point, one of the subdomains hasn't - * managed to call pm_genpd_poweron() for the master yet after - * incrementing it. In that case pm_genpd_poweron() will wait + * managed to call genpd_poweron() for the master yet after + * incrementing it. In that case genpd_poweron() will wait * for us to drop the lock, so we can call .power_off() and let - * the pm_genpd_poweron() restore power for us (this shouldn't + * the genpd_poweron() restore power for us (this shouldn't * happen very often). */ ret = genpd_power_off(genpd, true); @@ -466,7 +365,7 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); mutex_lock(&genpd->lock); - pm_genpd_poweroff(genpd); + genpd_poweroff(genpd, true); mutex_unlock(&genpd->lock); } @@ -482,6 +381,10 @@ static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; bool (*stop_ok)(struct device *__dev); + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; int ret; dev_dbg(dev, "%s()\n", __func__); @@ -490,20 +393,42 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; + /* + * A runtime PM centric subsystem/driver may re-use the runtime PM + * callbacks for other purposes than runtime PM. In those scenarios + * runtime PM is disabled. Under these circumstances, we shall skip + * validating/measuring the PM QoS latency. + */ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; - if (stop_ok && !stop_ok(dev)) + if (runtime_pm && stop_ok && !stop_ok(dev)) return -EBUSY; + /* Measure suspend latency. */ + if (runtime_pm) + time_start = ktime_get(); + ret = genpd_save_dev(genpd, dev); if (ret) return ret; ret = genpd_stop_dev(genpd, dev); if (ret) { - genpd_restore_dev(genpd, dev, true); + genpd_restore_dev(genpd, dev); return ret; } + /* Update suspend latency value if the measured time exceeds it. */ + if (runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->suspend_latency_ns) { + td->suspend_latency_ns = elapsed_ns; + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -512,9 +437,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - genpd->in_progress++; - pm_genpd_poweroff(genpd); - genpd->in_progress--; + genpd_poweroff(genpd, false); mutex_unlock(&genpd->lock); return 0; @@ -531,6 +454,10 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; int ret; bool timed = true; @@ -547,15 +474,31 @@ static int pm_genpd_runtime_resume(struct device *dev) } mutex_lock(&genpd->lock); - ret = __pm_genpd_poweron(genpd); + ret = genpd_poweron(genpd, 0); mutex_unlock(&genpd->lock); if (ret) return ret; out: - genpd_start_dev(genpd, dev, timed); - genpd_restore_dev(genpd, dev, timed); + /* Measure resume latency. */ + if (timed && runtime_pm) + time_start = ktime_get(); + + genpd_start_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + + /* Update resume latency value if the measured time exceeds it. */ + if (timed && runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->resume_latency_ns) { + td->resume_latency_ns = elapsed_ns; + dev_dbg(dev, "resume latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } return 0; } @@ -569,15 +512,15 @@ static int __init pd_ignore_unused_setup(char *__unused) __setup("pd_ignore_unused", pd_ignore_unused_setup); /** - * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. + * genpd_poweroff_unused - Power off all PM domains with no devices in use. */ -void pm_genpd_poweroff_unused(void) +static int __init genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; if (pd_ignore_unused) { pr_warn("genpd: Not disabling unused power domains\n"); - return; + return 0; } mutex_lock(&gpd_list_lock); @@ -586,11 +529,7 @@ void pm_genpd_poweroff_unused(void) genpd_queue_power_off_work(genpd); mutex_unlock(&gpd_list_lock); -} -static int __init genpd_poweroff_unused(void) -{ - pm_genpd_poweroff_unused(); return 0; } late_initcall(genpd_poweroff_unused); @@ -764,7 +703,7 @@ static int pm_genpd_prepare(struct device *dev) /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, - * so pm_genpd_poweron() will return immediately, but if the device + * so genpd_poweron() will return immediately, but if the device * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ @@ -890,7 +829,7 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev, true); + return genpd_start_dev(genpd, dev); } /** @@ -1018,7 +957,8 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); + return genpd->suspend_power_off ? + 0 : genpd_start_dev(genpd, dev); } /** @@ -1112,7 +1052,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev, true); + return genpd_start_dev(genpd, dev); } /** @@ -1240,10 +1180,11 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, } dev->power.subsys_data->domain_data = &gpd_data->base; - dev->pm_domain = &genpd->domain; spin_unlock_irq(&dev->power.lock); + dev_pm_domain_set(dev, &genpd->domain); + return gpd_data; err_free: @@ -1257,9 +1198,10 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev, struct generic_pm_domain_data *gpd_data) { + dev_pm_domain_set(dev, NULL); + spin_lock_irq(&dev->power.lock); - dev->pm_domain = NULL; dev->power.subsys_data->domain_data = NULL; spin_unlock_irq(&dev->power.lock); @@ -1315,18 +1257,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, return ret; } - -/** - * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. - * @domain_name: Name of the PM domain to add the device to. - * @dev: Device to be added. - * @td: Set of PM QoS timing parameters to attach to the device. - */ -int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, - struct gpd_timing_data *td) -{ - return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); -} +EXPORT_SYMBOL_GPL(__pm_genpd_add_device); /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. @@ -1377,6 +1308,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, return ret; } +EXPORT_SYMBOL_GPL(pm_genpd_remove_device); /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. @@ -1386,15 +1318,19 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { - struct gpd_link *link; + struct gpd_link *link, *itr; int ret = 0; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) || genpd == subdomain) return -EINVAL; - mutex_lock(&genpd->lock); - mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + mutex_lock(&subdomain->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { @@ -1402,18 +1338,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(link, &genpd->master_links, master_node) { - if (link->slave == subdomain && link->master == genpd) { + list_for_each_entry(itr, &genpd->master_links, master_node) { + if (itr->slave == subdomain && itr->master == genpd) { ret = -EINVAL; goto out; } } - link = kzalloc(sizeof(*link), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto out; - } link->master = genpd; list_add_tail(&link->master_node, &genpd->master_links); link->slave = subdomain; @@ -1422,40 +1353,13 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, genpd_sd_counter_inc(genpd); out: - mutex_unlock(&subdomain->lock); mutex_unlock(&genpd->lock); - + mutex_unlock(&subdomain->lock); + if (ret) + kfree(link); return ret; } - -/** - * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. - * @master_name: Name of the master PM domain to add the subdomain to. - * @subdomain_name: Name of the subdomain to be added. - */ -int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name) -{ - struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; - - if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) - return -EINVAL; - - mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - if (!master && !strcmp(gpd->name, master_name)) - master = gpd; - - if (!subdomain && !strcmp(gpd->name, subdomain_name)) - subdomain = gpd; - - if (master && subdomain) - break; - } - mutex_unlock(&gpd_list_lock); - - return pm_genpd_add_subdomain(master, subdomain); -} +EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. @@ -1471,7 +1375,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - mutex_lock(&genpd->lock); + mutex_lock(&subdomain->lock); + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { pr_warn("%s: unable to remove subdomain %s\n", genpd->name, @@ -1484,143 +1389,23 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (link->slave != subdomain) continue; - mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - list_del(&link->master_node); list_del(&link->slave_node); kfree(link); if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_dec(genpd); - mutex_unlock(&subdomain->lock); - ret = 0; break; } out: mutex_unlock(&genpd->lock); + mutex_unlock(&subdomain->lock); return ret; } - -/** - * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. - * @genpd: PM domain to be connected with cpuidle. - * @state: cpuidle state this domain can disable/enable. - * - * Make a PM domain behave as though it contained a CPU core, that is, instead - * of calling its power down routine it will enable the given cpuidle state so - * that the cpuidle subsystem can power it down (if possible and desirable). - */ -int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) -{ - struct cpuidle_driver *cpuidle_drv; - struct gpd_cpuidle_data *cpuidle_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd) || state < 0) - return -EINVAL; - - mutex_lock(&genpd->lock); - - if (genpd->cpuidle_data) { - ret = -EEXIST; - goto out; - } - cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL); - if (!cpuidle_data) { - ret = -ENOMEM; - goto out; - } - cpuidle_drv = cpuidle_driver_ref(); - if (!cpuidle_drv) { - ret = -ENODEV; - goto err_drv; - } - if (cpuidle_drv->state_count <= state) { - ret = -EINVAL; - goto err; - } - idle_state = &cpuidle_drv->states[state]; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto err; - } - cpuidle_data->idle_state = idle_state; - cpuidle_data->saved_exit_latency = idle_state->exit_latency; - genpd->cpuidle_data = cpuidle_data; - genpd_recalc_cpu_exit_latency(genpd); - - out: - mutex_unlock(&genpd->lock); - return ret; - - err: - cpuidle_driver_unref(); - - err_drv: - kfree(cpuidle_data); - goto out; -} - -/** - * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. - * @name: Name of the domain to connect to cpuidle. - * @state: cpuidle state this domain can manipulate. - */ -int pm_genpd_name_attach_cpuidle(const char *name, int state) -{ - return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); -} - -/** - * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. - * @genpd: PM domain to remove the cpuidle connection from. - * - * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the - * given PM domain. - */ -int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - struct gpd_cpuidle_data *cpuidle_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd)) - return -EINVAL; - - mutex_lock(&genpd->lock); - - cpuidle_data = genpd->cpuidle_data; - if (!cpuidle_data) { - ret = -ENODEV; - goto out; - } - idle_state = cpuidle_data->idle_state; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto out; - } - idle_state->exit_latency = cpuidle_data->saved_exit_latency; - cpuidle_driver_unref(); - genpd->cpuidle_data = NULL; - kfree(cpuidle_data); - - out: - mutex_unlock(&genpd->lock); - return ret; -} - -/** - * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. - * @name: Name of the domain to disconnect cpuidle from. - */ -int pm_genpd_name_detach_cpuidle(const char *name) -{ - return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); -} +EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); /* Default device callbacks for generic PM domains. */ @@ -1688,7 +1473,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, mutex_init(&genpd->lock); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); - genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; genpd->device_count = 0; @@ -1996,10 +1780,10 @@ int genpd_dev_pm_attach(struct device *dev) } pd = of_genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); if (IS_ERR(pd)) { dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); - of_node_put(dev->of_node); return -EPROBE_DEFER; } @@ -2017,14 +1801,15 @@ int genpd_dev_pm_attach(struct device *dev) if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", pd->name, ret); - of_node_put(dev->of_node); goto out; } dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - ret = pm_genpd_poweron(pd); + mutex_lock(&pd->lock); + ret = genpd_poweron(pd, 0); + mutex_unlock(&pd->lock); out: return ret ? -EPROBE_DEFER : 0; } diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 85e17bacc834..1e937ac5f456 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -77,10 +77,8 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->save_state_latency_ns + - td->stop_latency_ns + - td->start_latency_ns + - td->restore_state_latency_ns; + constraint_ns -= td->suspend_latency_ns + + td->resume_latency_ns; if (constraint_ns == 0) return false; } @@ -162,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct gpd_timing_data *td; s64 constraint_ns; - if (!pdd->dev->driver) - continue; - /* * Check if the device is allowed to be off long enough for the * domain to turn off and on (that's how much time it will diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 96a92db83cad..07c3c4a9522d 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -9,6 +9,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/export.h> +#include <linux/suspend.h> #ifdef CONFIG_PM /** @@ -296,11 +297,27 @@ void pm_generic_complete(struct device *dev) if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); +} +/** + * pm_complete_with_resume_check - Complete a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition and + * optionally schedule a runtime resume of the device if the system resume in + * progress has been initated by the platform firmware and the device had its + * power.direct_complete flag set. + */ +void pm_complete_with_resume_check(struct device *dev) +{ + pm_generic_complete(dev); /* - * Let runtime PM try to suspend devices that haven't been in use before - * going into the system-wide sleep state we're resuming from. + * If the device had been runtime-suspended before the system went into + * the sleep state it is going out of and it has never been resumed till + * now, resume it in case the firmware powered it up. */ - pm_request_idle(dev); + if (dev->power.direct_complete && pm_resume_via_firmware()) + pm_request_resume(dev); } +EXPORT_SYMBOL_GPL(pm_complete_with_resume_check); #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1710c26ba097..6e7c3ccea24b 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -125,6 +125,7 @@ void device_pm_add(struct device *dev) { pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + device_pm_check_callbacks(dev); mutex_lock(&dpm_list_mtx); if (dev->parent && dev->parent->power.is_prepared) dev_warn(dev, "parent %s should not be sleeping\n", @@ -147,6 +148,7 @@ void device_pm_remove(struct device *dev) mutex_unlock(&dpm_list_mtx); device_wakeup_disable(dev); pm_runtime_remove(dev); + device_pm_check_callbacks(dev); } /** @@ -963,6 +965,9 @@ void dpm_complete(pm_message_t state) } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); + + /* Allow device probing and trigger re-probing of deferred devices */ + device_unblock_probing(); trace_suspend_resume(TPS("dpm_complete"), state.event, false); } @@ -1569,6 +1574,11 @@ static int device_prepare(struct device *dev, pm_message_t state) dev->power.wakeup_path = device_may_wakeup(dev); + if (dev->power.no_pm_callbacks) { + ret = 1; /* Let device go direct_complete */ + goto unlock; + } + if (dev->pm_domain) { info = "preparing power domain "; callback = dev->pm_domain->ops.prepare; @@ -1591,6 +1601,7 @@ static int device_prepare(struct device *dev, pm_message_t state) if (callback) ret = callback(dev); +unlock: device_unlock(dev); if (ret < 0) { @@ -1624,6 +1635,20 @@ int dpm_prepare(pm_message_t state) trace_suspend_resume(TPS("dpm_prepare"), state.event, true); might_sleep(); + /* + * Give a chance for the known devices to complete their probes, before + * disable probing of devices. This sync point is important at least + * at boot time + hibernation restore. + */ + wait_for_device_probe(); + /* + * It is unsafe if probing of devices will happen during suspend or + * hibernation and system behavior will be unpredictable in this case. + * So, let's prohibit device's probing here and defer their probes + * instead. The normal behavior will be restored in dpm_complete(). + */ + device_block_probing(); + mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.next); @@ -1719,3 +1744,30 @@ void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) device_pm_unlock(); } EXPORT_SYMBOL_GPL(dpm_for_each_dev); + +static bool pm_ops_is_empty(const struct dev_pm_ops *ops) +{ + if (!ops) + return true; + + return !ops->prepare && + !ops->suspend && + !ops->suspend_late && + !ops->suspend_noirq && + !ops->resume_noirq && + !ops->resume_early && + !ops->resume && + !ops->complete; +} + +void device_pm_check_callbacks(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + dev->power.no_pm_callbacks = + (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && + (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->type || pm_ops_is_empty(dev->type->pm)) && + (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && + (!dev->driver || pm_ops_is_empty(dev->driver->pm)); + spin_unlock_irq(&dev->power.lock); +} diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile new file mode 100644 index 000000000000..19837ef04d8e --- /dev/null +++ b/drivers/base/power/opp/Makefile @@ -0,0 +1,3 @@ +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG +obj-y += core.o cpu.o +obj-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp/core.c index 7ae7cd990fbf..cf351d3dab1c 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp/core.c @@ -11,131 +11,16 @@ * published by the Free Software Foundation. */ -#include <linux/cpu.h> -#include <linux/kernel.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/device.h> -#include <linux/list.h> -#include <linux/rculist.h> -#include <linux/rcupdate.h> -#include <linux/pm_opp.h> #include <linux/of.h> #include <linux/export.h> -/* - * Internal data structure organization with the OPP layer library is as - * follows: - * dev_opp_list (root) - * |- device 1 (represents voltage domain 1) - * | |- opp 1 (availability, freq, voltage) - * | |- opp 2 .. - * ... ... - * | `- opp n .. - * |- device 2 (represents the next voltage domain) - * ... - * `- device m (represents mth voltage domain) - * device 1, 2.. are represented by dev_opp structure while each opp - * is represented by the opp structure. - */ - -/** - * struct dev_pm_opp - Generic OPP description structure - * @node: opp list node. The nodes are maintained throughout the lifetime - * of boot. It is expected only an optimal set of OPPs are - * added to the library by the SoC framework. - * RCU usage: opp list is traversed with RCU locks. node - * modification is possible realtime, hence the modifications - * are protected by the dev_opp_list_lock for integrity. - * IMPORTANT: the opp nodes should be maintained in increasing - * order. - * @dynamic: not-created from static DT entries. - * @available: true/false - marks if this OPP as available or not - * @turbo: true if turbo (boost) OPP - * @rate: Frequency in hertz - * @u_volt: Target voltage in microvolts corresponding to this OPP - * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP - * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP - * @u_amp: Maximum current drawn by the device in microamperes - * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's - * frequency from any other OPP's frequency. - * @dev_opp: points back to the device_opp struct this opp belongs to - * @rcu_head: RCU callback head used for deferred freeing - * @np: OPP's device node. - * - * This structure stores the OPP information for a given device. - */ -struct dev_pm_opp { - struct list_head node; - - bool available; - bool dynamic; - bool turbo; - unsigned long rate; - - unsigned long u_volt; - unsigned long u_volt_min; - unsigned long u_volt_max; - unsigned long u_amp; - unsigned long clock_latency_ns; - - struct device_opp *dev_opp; - struct rcu_head rcu_head; - - struct device_node *np; -}; - -/** - * struct device_list_opp - devices managed by 'struct device_opp' - * @node: list node - * @dev: device to which the struct object belongs - * @rcu_head: RCU callback head used for deferred freeing - * - * This is an internal data structure maintaining the list of devices that are - * managed by 'struct device_opp'. - */ -struct device_list_opp { - struct list_head node; - const struct device *dev; - struct rcu_head rcu_head; -}; - -/** - * struct device_opp - Device opp structure - * @node: list node - contains the devices with OPPs that - * have been registered. Nodes once added are not modified in this - * list. - * RCU usage: nodes are not modified in the list of device_opp, - * however addition is possible and is secured by dev_opp_list_lock - * @srcu_head: notifier head to notify the OPP availability changes. - * @rcu_head: RCU callback head used for deferred freeing - * @dev_list: list of devices that share these OPPs - * @opp_list: list of opps - * @np: struct device_node pointer for opp's DT node. - * @shared_opp: OPP is shared between multiple devices. - * - * This is an internal data structure maintaining the link to opps attached to - * a device. This structure is not meant to be shared to users as it is - * meant for book keeping and private to OPP library. - * - * Because the opp structures can be used from both rcu and srcu readers, we - * need to wait for the grace period of both of them before freeing any - * resources. And so we have used kfree_rcu() from within call_srcu() handlers. - */ -struct device_opp { - struct list_head node; - - struct srcu_notifier_head srcu_head; - struct rcu_head rcu_head; - struct list_head dev_list; - struct list_head opp_list; - - struct device_node *np; - unsigned long clock_latency_ns_max; - bool shared_opp; - struct dev_pm_opp *suspend_opp; -}; +#include "opp.h" /* * The root of the list of all devices. All device_opp structures branch off @@ -144,7 +29,7 @@ struct device_opp { */ static LIST_HEAD(dev_opp_list); /* Lock to allow exclusive modification to the device and opp lists */ -static DEFINE_MUTEX(dev_opp_list_lock); +DEFINE_MUTEX(dev_opp_list_lock); #define opp_rcu_lockdep_assert() \ do { \ @@ -196,14 +81,18 @@ static struct device_opp *_managed_opp(const struct device_node *np) * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or * -EINVAL based on type of error. * - * Locking: This function must be called under rcu_read_lock(). device_opp - * is a RCU protected pointer. This means that device_opp is valid as long - * as we are under RCU lock. + * Locking: For readers, this function must be called under rcu_read_lock(). + * device_opp is a RCU protected pointer, which means that device_opp is valid + * as long as we are under RCU lock. + * + * For Writers, this function must be called with dev_opp_list_lock held. */ -static struct device_opp *_find_device_opp(struct device *dev) +struct device_opp *_find_device_opp(struct device *dev) { struct device_opp *dev_opp; + opp_rcu_lockdep_assert(); + if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); @@ -217,7 +106,7 @@ static struct device_opp *_find_device_opp(struct device *dev) } /** - * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp + * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp * @opp: opp for which voltage has to be returned for * * Return: voltage in micro volt corresponding to the opp, else @@ -239,7 +128,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) opp_rcu_lockdep_assert(); tmp_opp = rcu_dereference(opp); - if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) + if (IS_ERR_OR_NULL(tmp_opp)) pr_err("%s: Invalid parameters\n", __func__); else v = tmp_opp->u_volt; @@ -574,15 +463,17 @@ static void _kfree_list_dev_rcu(struct rcu_head *head) static void _remove_list_dev(struct device_list_opp *list_dev, struct device_opp *dev_opp) { + opp_debug_unregister(list_dev, dev_opp); list_del(&list_dev->node); call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head, _kfree_list_dev_rcu); } -static struct device_list_opp *_add_list_dev(const struct device *dev, - struct device_opp *dev_opp) +struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp) { struct device_list_opp *list_dev; + int ret; list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL); if (!list_dev) @@ -592,6 +483,12 @@ static struct device_list_opp *_add_list_dev(const struct device *dev, list_dev->dev = dev; list_add_rcu(&list_dev->node, &dev_opp->dev_list); + /* Create debugfs entries for the dev_opp */ + ret = opp_debug_register(list_dev, dev_opp); + if (ret) + dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", + __func__, ret); + return list_dev; } @@ -662,6 +559,12 @@ static void _remove_device_opp(struct device_opp *dev_opp) if (!list_empty(&dev_opp->opp_list)) return; + if (dev_opp->supported_hw) + return; + + if (dev_opp->prop_name) + return; + list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp, node); @@ -707,6 +610,7 @@ static void _opp_remove(struct device_opp *dev_opp, */ if (notify) srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); + opp_debug_remove_one(opp); list_del_rcu(&opp->node); call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); @@ -784,6 +688,7 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, { struct dev_pm_opp *opp; struct list_head *head = &dev_opp->opp_list; + int ret; /* * Insert new OPP in order of increasing frequency and discard if @@ -814,11 +719,16 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, new_opp->dev_opp = dev_opp; list_add_rcu(&new_opp->node, head); + ret = opp_debug_create_one(new_opp, dev_opp); + if (ret) + dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", + __func__, ret); + return 0; } /** - * _opp_add_dynamic() - Allocate a dynamic OPP. + * _opp_add_v1() - Allocate a OPP based on v1 bindings. * @dev: device for which we do this operation * @freq: Frequency in Hz for this OPP * @u_volt: Voltage in uVolts for this OPP @@ -828,8 +738,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * The opp is made available by default and it can be controlled using * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. * - * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and - * freed by of_free_opp_table. + * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table + * and freed by dev_pm_opp_of_remove_table. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks @@ -844,8 +754,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, * Duplicate OPPs (both freq and volt are same) and !opp->available * -ENOMEM Memory allocation failure */ -static int _opp_add_dynamic(struct device *dev, unsigned long freq, - long u_volt, bool dynamic) +static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, + bool dynamic) { struct device_opp *dev_opp; struct dev_pm_opp *new_opp; @@ -887,34 +797,49 @@ unlock: } /* TODO: Support multiple regulators */ -static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) +static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, + struct device_opp *dev_opp) { u32 microvolt[3] = {0}; + u32 val; int count, ret; + struct property *prop = NULL; + char name[NAME_MAX]; + + /* Search for "opp-microvolt-<name>" */ + if (dev_opp->prop_name) { + snprintf(name, sizeof(name), "opp-microvolt-%s", + dev_opp->prop_name); + prop = of_find_property(opp->np, name, NULL); + } - /* Missing property isn't a problem, but an invalid entry is */ - if (!of_find_property(opp->np, "opp-microvolt", NULL)) - return 0; + if (!prop) { + /* Search for "opp-microvolt" */ + sprintf(name, "opp-microvolt"); + prop = of_find_property(opp->np, name, NULL); - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + /* Missing property isn't a problem, but an invalid entry is */ + if (!prop) + return 0; + } + + count = of_property_count_u32_elems(opp->np, name); if (count < 0) { - dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", - __func__, count); + dev_err(dev, "%s: Invalid %s property (%d)\n", + __func__, name, count); return count; } /* There can be one or three elements here */ if (count != 1 && count != 3) { - dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", - __func__, count); + dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", + __func__, name, count); return -EINVAL; } - ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt, - count); + ret = of_property_read_u32_array(opp->np, name, microvolt, count); if (ret) { - dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__, - ret); + dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); return -EINVAL; } @@ -922,7 +847,269 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) opp->u_volt_min = microvolt[1]; opp->u_volt_max = microvolt[2]; + /* Search for "opp-microamp-<name>" */ + prop = NULL; + if (dev_opp->prop_name) { + snprintf(name, sizeof(name), "opp-microamp-%s", + dev_opp->prop_name); + prop = of_find_property(opp->np, name, NULL); + } + + if (!prop) { + /* Search for "opp-microamp" */ + sprintf(name, "opp-microamp"); + prop = of_find_property(opp->np, name, NULL); + } + + if (prop && !of_property_read_u32(opp->np, name, &val)) + opp->u_amp = val; + + return 0; +} + +/** + * dev_pm_opp_set_supported_hw() - Set supported platforms + * @dev: Device for which supported-hw has to be set. + * @versions: Array of hierarchy of versions to match. + * @count: Number of elements in the array. + * + * This is required only for the V2 bindings, and it enables a platform to + * specify the hierarchy of versions it supports. OPP layer will then enable + * OPPs, which are available for those versions, based on its 'opp-supported-hw' + * property. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, + unsigned int count) +{ + struct device_opp *dev_opp; + int ret = 0; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = _add_device_opp(dev); + if (!dev_opp) { + ret = -ENOMEM; + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + /* Do we already have a version hierarchy associated with dev_opp? */ + if (dev_opp->supported_hw) { + dev_err(dev, "%s: Already have supported hardware list\n", + __func__); + ret = -EBUSY; + goto err; + } + + dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions), + GFP_KERNEL); + if (!dev_opp->supported_hw) { + ret = -ENOMEM; + goto err; + } + + dev_opp->supported_hw_count = count; + mutex_unlock(&dev_opp_list_lock); return 0; + +err: + _remove_device_opp(dev_opp); +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); + +/** + * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw + * @dev: Device for which supported-hw has to be set. + * + * This is required only for the V2 bindings, and is called for a matching + * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure + * will not be freed. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_supported_hw(struct device *dev) +{ + struct device_opp *dev_opp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp)); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + if (!dev_opp->supported_hw) { + dev_err(dev, "%s: Doesn't have supported hardware list\n", + __func__); + goto unlock; + } + + kfree(dev_opp->supported_hw); + dev_opp->supported_hw = NULL; + dev_opp->supported_hw_count = 0; + + /* Try freeing device_opp if this was the last blocking resource */ + _remove_device_opp(dev_opp); + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); + +/** + * dev_pm_opp_set_prop_name() - Set prop-extn name + * @dev: Device for which the regulator has to be set. + * @name: name to postfix to properties. + * + * This is required only for the V2 bindings, and it enables a platform to + * specify the extn to be used for certain property names. The properties to + * which the extension will apply are opp-microvolt and opp-microamp. OPP core + * should postfix the property name with -<name> while looking for them. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + struct device_opp *dev_opp; + int ret = 0; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = _add_device_opp(dev); + if (!dev_opp) { + ret = -ENOMEM; + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + /* Do we already have a prop-name associated with dev_opp? */ + if (dev_opp->prop_name) { + dev_err(dev, "%s: Already have prop-name %s\n", __func__, + dev_opp->prop_name); + ret = -EBUSY; + goto err; + } + + dev_opp->prop_name = kstrdup(name, GFP_KERNEL); + if (!dev_opp->prop_name) { + ret = -ENOMEM; + goto err; + } + + mutex_unlock(&dev_opp_list_lock); + return 0; + +err: + _remove_device_opp(dev_opp); +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); + +/** + * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name + * @dev: Device for which the regulator has to be set. + * + * This is required only for the V2 bindings, and is called for a matching + * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure + * will not be freed. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +void dev_pm_opp_put_prop_name(struct device *dev) +{ + struct device_opp *dev_opp; + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' first */ + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) { + dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp)); + goto unlock; + } + + /* Make sure there are no concurrent readers while updating dev_opp */ + WARN_ON(!list_empty(&dev_opp->opp_list)); + + if (!dev_opp->prop_name) { + dev_err(dev, "%s: Doesn't have a prop-name\n", __func__); + goto unlock; + } + + kfree(dev_opp->prop_name); + dev_opp->prop_name = NULL; + + /* Try freeing device_opp if this was the last blocking resource */ + _remove_device_opp(dev_opp); + +unlock: + mutex_unlock(&dev_opp_list_lock); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); + +static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp, + struct device_node *np) +{ + unsigned int count = dev_opp->supported_hw_count; + u32 version; + int ret; + + if (!dev_opp->supported_hw) + return true; + + while (count--) { + ret = of_property_read_u32_index(np, "opp-supported-hw", count, + &version); + if (ret) { + dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", + __func__, count, ret); + return false; + } + + /* Both of these are bitwise masks of the versions */ + if (!(version & dev_opp->supported_hw[count])) + return false; + } + + return true; } /** @@ -971,6 +1158,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) goto free_opp; } + /* Check if the OPP supports hardware's hierarchy of versions or not */ + if (!_opp_is_supported(dev, dev_opp, np)) { + dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); + goto free_opp; + } + /* * Rate is defined as an unsigned long in clk API, and so casting * explicitly to its type. Must be fixed once rate is 64 bit @@ -986,25 +1179,24 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; - ret = opp_get_microvolt(new_opp, dev); + ret = opp_parse_supplies(new_opp, dev, dev_opp); if (ret) goto free_opp; - if (!of_property_read_u32(new_opp->np, "opp-microamp", &val)) - new_opp->u_amp = val; - ret = _opp_add(dev, new_opp, dev_opp); if (ret) goto free_opp; /* OPP to select on device suspend */ if (of_property_read_bool(np, "opp-suspend")) { - if (dev_opp->suspend_opp) + if (dev_opp->suspend_opp) { dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", __func__, dev_opp->suspend_opp->rate, new_opp->rate); - else + } else { + new_opp->suspend = true; dev_opp->suspend_opp = new_opp; + } } if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) @@ -1056,7 +1248,7 @@ unlock: */ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return _opp_add_dynamic(dev, freq, u_volt, true); + return _opp_add_v1(dev, freq, u_volt, true); } EXPORT_SYMBOL_GPL(dev_pm_opp_add); @@ -1220,7 +1412,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); #ifdef CONFIG_OF /** - * of_free_opp_table() - Free OPP table entries created from static DT entries + * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT + * entries * @dev: device pointer used to lookup device OPPs. * * Free OPPs created using static entries present in DT. @@ -1231,7 +1424,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. */ -void of_free_opp_table(struct device *dev) +void dev_pm_opp_of_remove_table(struct device *dev) { struct device_opp *dev_opp; struct dev_pm_opp *opp, *tmp; @@ -1266,92 +1459,39 @@ void of_free_opp_table(struct device *dev) unlock: mutex_unlock(&dev_opp_list_lock); } -EXPORT_SYMBOL_GPL(of_free_opp_table); +EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); -void of_cpumask_free_opp_table(cpumask_var_t cpumask) +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *_of_get_opp_desc_node(struct device *dev) { - struct device *cpu_dev; - int cpu; - - WARN_ON(cpumask_empty(cpumask)); - - for_each_cpu(cpu, cpumask) { - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - pr_err("%s: failed to get cpu%d device\n", __func__, - cpu); - continue; - } - - of_free_opp_table(cpu_dev); - } -} -EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); - -/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ -static struct device_node * -_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) -{ - struct device_node *opp_np; - - opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value)); - if (!opp_np) { - dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n", - __func__, prop->name); - return ERR_PTR(-EINVAL); - } - - return opp_np; -} - -/* Returns opp descriptor node for a device. Caller must do of_node_put() */ -static struct device_node *_of_get_opp_desc_node(struct device *dev) -{ - const struct property *prop; - - prop = of_find_property(dev->of_node, "operating-points-v2", NULL); - if (!prop) - return ERR_PTR(-ENODEV); - if (!prop->value) - return ERR_PTR(-ENODATA); - /* * TODO: Support for multiple OPP tables. * * There should be only ONE phandle present in "operating-points-v2" * property. */ - if (prop->length != sizeof(__be32)) { - dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); - return ERR_PTR(-EINVAL); - } - return _of_get_opp_desc_node_from_prop(dev, prop); + return of_parse_phandle(dev->of_node, "operating-points-v2", 0); } /* Initializes OPP tables based on new bindings */ -static int _of_init_opp_table_v2(struct device *dev, - const struct property *prop) +static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) { - struct device_node *opp_np, *np; + struct device_node *np; struct device_opp *dev_opp; int ret = 0, count = 0; - if (!prop->value) - return -ENODATA; - - /* Get opp node */ - opp_np = _of_get_opp_desc_node_from_prop(dev, prop); - if (IS_ERR(opp_np)) - return PTR_ERR(opp_np); + mutex_lock(&dev_opp_list_lock); dev_opp = _managed_opp(opp_np); if (dev_opp) { /* OPPs are already managed */ if (!_add_list_dev(dev, dev_opp)) ret = -ENOMEM; - goto put_opp_np; + mutex_unlock(&dev_opp_list_lock); + return ret; } + mutex_unlock(&dev_opp_list_lock); /* We have opp-list node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_np, np) { @@ -1366,33 +1506,33 @@ static int _of_init_opp_table_v2(struct device *dev, } /* There should be one of more OPP defined */ - if (WARN_ON(!count)) { - ret = -ENOENT; - goto put_opp_np; - } + if (WARN_ON(!count)) + return -ENOENT; + + mutex_lock(&dev_opp_list_lock); dev_opp = _find_device_opp(dev); if (WARN_ON(IS_ERR(dev_opp))) { ret = PTR_ERR(dev_opp); + mutex_unlock(&dev_opp_list_lock); goto free_table; } dev_opp->np = opp_np; dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared"); - of_node_put(opp_np); + mutex_unlock(&dev_opp_list_lock); + return 0; free_table: - of_free_opp_table(dev); -put_opp_np: - of_node_put(opp_np); + dev_pm_opp_of_remove_table(dev); return ret; } /* Initializes OPP tables based on old-deprecated bindings */ -static int _of_init_opp_table_v1(struct device *dev) +static int _of_add_opp_table_v1(struct device *dev) { const struct property *prop; const __be32 *val; @@ -1419,7 +1559,7 @@ static int _of_init_opp_table_v1(struct device *dev) unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); - if (_opp_add_dynamic(dev, freq, volt, false)) + if (_opp_add_v1(dev, freq, volt, false)) dev_warn(dev, "%s: Failed to add OPP %ld\n", __func__, freq); nr -= 2; @@ -1429,7 +1569,7 @@ static int _of_init_opp_table_v1(struct device *dev) } /** - * of_init_opp_table() - Initialize opp table from device tree + * dev_pm_opp_of_add_table() - Initialize opp table from device tree * @dev: device pointer used to lookup device OPPs. * * Register the initial OPP table with the OPP library for given device. @@ -1451,153 +1591,28 @@ static int _of_init_opp_table_v1(struct device *dev) * -ENODATA when empty 'operating-points' property is found * -EINVAL when invalid entries are found in opp-v2 table */ -int of_init_opp_table(struct device *dev) +int dev_pm_opp_of_add_table(struct device *dev) { - const struct property *prop; + struct device_node *opp_np; + int ret; /* * OPPs have two version of bindings now. The older one is deprecated, * try for the new binding first. */ - prop = of_find_property(dev->of_node, "operating-points-v2", NULL); - if (!prop) { + opp_np = _of_get_opp_desc_node(dev); + if (!opp_np) { /* * Try old-deprecated bindings for backward compatibility with * older dtbs. */ - return _of_init_opp_table_v1(dev); + return _of_add_opp_table_v1(dev); } - return _of_init_opp_table_v2(dev, prop); -} -EXPORT_SYMBOL_GPL(of_init_opp_table); - -int of_cpumask_init_opp_table(cpumask_var_t cpumask) -{ - struct device *cpu_dev; - int cpu, ret = 0; - - WARN_ON(cpumask_empty(cpumask)); - - for_each_cpu(cpu, cpumask) { - cpu_dev = get_cpu_device(cpu); - if (!cpu_dev) { - pr_err("%s: failed to get cpu%d device\n", __func__, - cpu); - continue; - } - - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", - __func__, cpu, ret); - - /* Free all other OPPs */ - of_cpumask_free_opp_table(cpumask); - break; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); - -/* Required only for V1 bindings, as v2 can manage it from DT itself */ -int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) -{ - struct device_list_opp *list_dev; - struct device_opp *dev_opp; - struct device *dev; - int cpu, ret = 0; - - rcu_read_lock(); - - dev_opp = _find_device_opp(cpu_dev); - if (IS_ERR(dev_opp)) { - ret = -EINVAL; - goto out_rcu_read_unlock; - } - - for_each_cpu(cpu, cpumask) { - if (cpu == cpu_dev->id) - continue; - - dev = get_cpu_device(cpu); - if (!dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", - __func__, cpu); - continue; - } - - list_dev = _add_list_dev(dev, dev_opp); - if (!list_dev) { - dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", - __func__, cpu); - continue; - } - } -out_rcu_read_unlock: - rcu_read_unlock(); - - return 0; -} -EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); - -/* - * Works only for OPP v2 bindings. - * - * cpumask should be already set to mask of cpu_dev->id. - * Returns -ENOENT if operating-points-v2 bindings aren't supported. - */ -int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) -{ - struct device_node *np, *tmp_np; - struct device *tcpu_dev; - int cpu, ret = 0; - - /* Get OPP descriptor node */ - np = _of_get_opp_desc_node(cpu_dev); - if (IS_ERR(np)) { - dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, - PTR_ERR(np)); - return -ENOENT; - } - - /* OPPs are shared ? */ - if (!of_property_read_bool(np, "opp-shared")) - goto put_cpu_node; - - for_each_possible_cpu(cpu) { - if (cpu == cpu_dev->id) - continue; - - tcpu_dev = get_cpu_device(cpu); - if (!tcpu_dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", - __func__, cpu); - ret = -ENODEV; - goto put_cpu_node; - } - - /* Get OPP descriptor node */ - tmp_np = _of_get_opp_desc_node(tcpu_dev); - if (IS_ERR(tmp_np)) { - dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", - __func__, PTR_ERR(tmp_np)); - ret = PTR_ERR(tmp_np); - goto put_cpu_node; - } - - /* CPUs are sharing opp node */ - if (np == tmp_np) - cpumask_set_cpu(cpu, cpumask); - - of_node_put(tmp_np); - } + ret = _of_add_opp_table_v2(dev, opp_np); + of_node_put(opp_np); -put_cpu_node: - of_node_put(np); return ret; } -EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps); +EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); #endif diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c new file mode 100644 index 000000000000..9f0c15570f64 --- /dev/null +++ b/drivers/base/power/opp/cpu.c @@ -0,0 +1,271 @@ +/* + * Generic OPP helper interface for CPU device + * + * Copyright (C) 2009-2014 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/of.h> +#include <linux/slab.h> + +#include "opp.h" + +#ifdef CONFIG_CPU_FREQ + +/** + * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Since we just use the regular accessor functions to access the internal data + * structures, we use RCU read lock inside this function. As a result, users of + * this function DONOT need to use explicit locks for invoking. + */ +int dev_pm_opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct dev_pm_opp *opp; + struct cpufreq_frequency_table *freq_table = NULL; + int i, max_opps, ret = 0; + unsigned long rate; + + rcu_read_lock(); + + max_opps = dev_pm_opp_get_opp_count(dev); + if (max_opps <= 0) { + ret = max_opps ? max_opps : -ENODATA; + goto out; + } + + freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); + if (!freq_table) { + ret = -ENOMEM; + goto out; + } + + for (i = 0, rate = 0; i < max_opps; i++, rate++) { + /* find next rate */ + opp = dev_pm_opp_find_freq_ceil(dev, &rate); + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out; + } + freq_table[i].driver_data = i; + freq_table[i].frequency = rate / 1000; + + /* Is Boost/turbo opp ? */ + if (dev_pm_opp_is_turbo(opp)) + freq_table[i].flags = CPUFREQ_BOOST_FREQ; + } + + freq_table[i].driver_data = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + +out: + rcu_read_unlock(); + if (ret) + kfree(freq_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); + +/** + * dev_pm_opp_free_cpufreq_table() - free the cpufreq table + * @dev: device for which we do this operation + * @table: table to free + * + * Free up the table allocated by dev_pm_opp_init_cpufreq_table + */ +void dev_pm_opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + if (!table) + return; + + kfree(*table); + *table = NULL; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); +#endif /* CONFIG_CPU_FREQ */ + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_list_opp *list_dev; + struct device_opp *dev_opp; + struct device *dev; + int cpu, ret = 0; + + mutex_lock(&dev_opp_list_lock); + + dev_opp = _find_device_opp(cpu_dev); + if (IS_ERR(dev_opp)) { + ret = -EINVAL; + goto unlock; + } + + for_each_cpu(cpu, cpumask) { + if (cpu == cpu_dev->id) + continue; + + dev = get_cpu_device(cpu); + if (!dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + continue; + } + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + __func__, cpu); + continue; + } + } +unlock: + mutex_unlock(&dev_opp_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); + +#ifdef CONFIG_OF +void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + dev_pm_opp_of_remove_table(cpu_dev); + } +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); + +int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu, ret = 0; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + ret = dev_pm_opp_of_add_table(cpu_dev); + if (ret) { + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); + + /* Free all other OPPs */ + dev_pm_opp_of_cpumask_remove_table(cpumask); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); + +/* + * Works only for OPP v2 bindings. + * + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_node *np, *tmp_np; + struct device *tcpu_dev; + int cpu, ret = 0; + + /* Get OPP descriptor node */ + np = _of_get_opp_desc_node(cpu_dev); + if (!np) { + dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); + return -ENOENT; + } + + cpumask_set_cpu(cpu_dev->id, cpumask); + + /* OPPs are shared ? */ + if (!of_property_read_bool(np, "opp-shared")) + goto put_cpu_node; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + ret = -ENODEV; + goto put_cpu_node; + } + + /* Get OPP descriptor node */ + tmp_np = _of_get_opp_desc_node(tcpu_dev); + if (!tmp_np) { + dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", + __func__); + ret = -ENOENT; + goto put_cpu_node; + } + + /* CPUs are sharing opp node */ + if (np == tmp_np) + cpumask_set_cpu(cpu, cpumask); + + of_node_put(tmp_np); + } + +put_cpu_node: + of_node_put(np); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); +#endif diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c new file mode 100644 index 000000000000..ddfe4773e922 --- /dev/null +++ b/drivers/base/power/opp/debugfs.c @@ -0,0 +1,219 @@ +/* + * Generic OPP debugfs interface + * + * Copyright (C) 2015-2016 Viresh Kumar <viresh.kumar@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/limits.h> + +#include "opp.h" + +static struct dentry *rootdir; + +static void opp_set_dev_name(const struct device *dev, char *name) +{ + if (dev->parent) + snprintf(name, NAME_MAX, "%s-%s", dev_name(dev->parent), + dev_name(dev)); + else + snprintf(name, NAME_MAX, "%s", dev_name(dev)); +} + +void opp_debug_remove_one(struct dev_pm_opp *opp) +{ + debugfs_remove_recursive(opp->dentry); +} + +int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp) +{ + struct dentry *pdentry = dev_opp->dentry; + struct dentry *d; + char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */ + + /* Rate is unique to each OPP, use it to give opp-name */ + snprintf(name, sizeof(name), "opp:%lu", opp->rate); + + /* Create per-opp directory */ + d = debugfs_create_dir(name, pdentry); + if (!d) + return -ENOMEM; + + if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available)) + return -ENOMEM; + + if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic)) + return -ENOMEM; + + if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo)) + return -ENOMEM; + + if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend)) + return -ENOMEM; + + if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max)) + return -ENOMEM; + + if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp)) + return -ENOMEM; + + if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, + &opp->clock_latency_ns)) + return -ENOMEM; + + opp->dentry = d; + return 0; +} + +static int device_opp_debug_create_dir(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + const struct device *dev = list_dev->dev; + struct dentry *d; + + opp_set_dev_name(dev, dev_opp->dentry_name); + + /* Create device specific directory */ + d = debugfs_create_dir(dev_opp->dentry_name, rootdir); + if (!d) { + dev_err(dev, "%s: Failed to create debugfs dir\n", __func__); + return -ENOMEM; + } + + list_dev->dentry = d; + dev_opp->dentry = d; + + return 0; +} + +static int device_opp_debug_create_link(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + const struct device *dev = list_dev->dev; + char name[NAME_MAX]; + struct dentry *d; + + opp_set_dev_name(list_dev->dev, name); + + /* Create device specific directory link */ + d = debugfs_create_symlink(name, rootdir, dev_opp->dentry_name); + if (!d) { + dev_err(dev, "%s: Failed to create link\n", __func__); + return -ENOMEM; + } + + list_dev->dentry = d; + + return 0; +} + +/** + * opp_debug_register - add a device opp node to the debugfs 'opp' directory + * @list_dev: list-dev pointer for device + * @dev_opp: the device-opp being added + * + * Dynamically adds device specific directory in debugfs 'opp' directory. If the + * device-opp is shared with other devices, then links will be created for all + * devices except the first. + * + * Return: 0 on success, otherwise negative error. + */ +int opp_debug_register(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + if (!rootdir) { + pr_debug("%s: Uninitialized rootdir\n", __func__); + return -EINVAL; + } + + if (dev_opp->dentry) + return device_opp_debug_create_link(list_dev, dev_opp); + + return device_opp_debug_create_dir(list_dev, dev_opp); +} + +static void opp_migrate_dentry(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + struct device_list_opp *new_dev; + const struct device *dev; + struct dentry *dentry; + + /* Look for next list-dev */ + list_for_each_entry(new_dev, &dev_opp->dev_list, node) + if (new_dev != list_dev) + break; + + /* new_dev is guaranteed to be valid here */ + dev = new_dev->dev; + debugfs_remove_recursive(new_dev->dentry); + + opp_set_dev_name(dev, dev_opp->dentry_name); + + dentry = debugfs_rename(rootdir, list_dev->dentry, rootdir, + dev_opp->dentry_name); + if (!dentry) { + dev_err(dev, "%s: Failed to rename link from: %s to %s\n", + __func__, dev_name(list_dev->dev), dev_name(dev)); + return; + } + + new_dev->dentry = dentry; + dev_opp->dentry = dentry; +} + +/** + * opp_debug_unregister - remove a device opp node from debugfs opp directory + * @list_dev: list-dev pointer for device + * @dev_opp: the device-opp being removed + * + * Dynamically removes device specific directory from debugfs 'opp' directory. + */ +void opp_debug_unregister(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ + if (list_dev->dentry == dev_opp->dentry) { + /* Move the real dentry object under another device */ + if (!list_is_singular(&dev_opp->dev_list)) { + opp_migrate_dentry(list_dev, dev_opp); + goto out; + } + dev_opp->dentry = NULL; + } + + debugfs_remove_recursive(list_dev->dentry); + +out: + list_dev->dentry = NULL; +} + +static int __init opp_debug_init(void) +{ + /* Create /sys/kernel/debug/opp directory */ + rootdir = debugfs_create_dir("opp", NULL); + if (!rootdir) { + pr_err("%s: Failed to create root directory\n", __func__); + return -ENOMEM; + } + + return 0; +} +core_initcall(opp_debug_init); diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h new file mode 100644 index 000000000000..690638ef36ee --- /dev/null +++ b/drivers/base/power/opp/opp.h @@ -0,0 +1,197 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DRIVER_OPP_H__ +#define __DRIVER_OPP_H__ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/limits.h> +#include <linux/pm_opp.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> + +/* Lock to allow exclusive modification to the device and opp lists */ +extern struct mutex dev_opp_list_lock; + +/* + * Internal data structure organization with the OPP layer library is as + * follows: + * dev_opp_list (root) + * |- device 1 (represents voltage domain 1) + * | |- opp 1 (availability, freq, voltage) + * | |- opp 2 .. + * ... ... + * | `- opp n .. + * |- device 2 (represents the next voltage domain) + * ... + * `- device m (represents mth voltage domain) + * device 1, 2.. are represented by dev_opp structure while each opp + * is represented by the opp structure. + */ + +/** + * struct dev_pm_opp - Generic OPP description structure + * @node: opp list node. The nodes are maintained throughout the lifetime + * of boot. It is expected only an optimal set of OPPs are + * added to the library by the SoC framework. + * RCU usage: opp list is traversed with RCU locks. node + * modification is possible realtime, hence the modifications + * are protected by the dev_opp_list_lock for integrity. + * IMPORTANT: the opp nodes should be maintained in increasing + * order. + * @available: true/false - marks if this OPP as available or not + * @dynamic: not-created from static DT entries. + * @turbo: true if turbo (boost) OPP + * @suspend: true if suspend OPP + * @rate: Frequency in hertz + * @u_volt: Target voltage in microvolts corresponding to this OPP + * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP + * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP + * @u_amp: Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + * frequency from any other OPP's frequency. + * @dev_opp: points back to the device_opp struct this opp belongs to + * @rcu_head: RCU callback head used for deferred freeing + * @np: OPP's device node. + * @dentry: debugfs dentry pointer (per opp) + * + * This structure stores the OPP information for a given device. + */ +struct dev_pm_opp { + struct list_head node; + + bool available; + bool dynamic; + bool turbo; + bool suspend; + unsigned long rate; + + unsigned long u_volt; + unsigned long u_volt_min; + unsigned long u_volt_max; + unsigned long u_amp; + unsigned long clock_latency_ns; + + struct device_opp *dev_opp; + struct rcu_head rcu_head; + + struct device_node *np; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif +}; + +/** + * struct device_list_opp - devices managed by 'struct device_opp' + * @node: list node + * @dev: device to which the struct object belongs + * @rcu_head: RCU callback head used for deferred freeing + * @dentry: debugfs dentry pointer (per device) + * + * This is an internal data structure maintaining the list of devices that are + * managed by 'struct device_opp'. + */ +struct device_list_opp { + struct list_head node; + const struct device *dev; + struct rcu_head rcu_head; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif +}; + +/** + * struct device_opp - Device opp structure + * @node: list node - contains the devices with OPPs that + * have been registered. Nodes once added are not modified in this + * list. + * RCU usage: nodes are not modified in the list of device_opp, + * however addition is possible and is secured by dev_opp_list_lock + * @srcu_head: notifier head to notify the OPP availability changes. + * @rcu_head: RCU callback head used for deferred freeing + * @dev_list: list of devices that share these OPPs + * @opp_list: list of opps + * @np: struct device_node pointer for opp's DT node. + * @clock_latency_ns_max: Max clock latency in nanoseconds. + * @shared_opp: OPP is shared between multiple devices. + * @suspend_opp: Pointer to OPP to be used during device suspend. + * @supported_hw: Array of version number to support. + * @supported_hw_count: Number of elements in supported_hw array. + * @prop_name: A name to postfix to many DT properties, while parsing them. + * @dentry: debugfs dentry pointer of the real device directory (not links). + * @dentry_name: Name of the real dentry. + * + * This is an internal data structure maintaining the link to opps attached to + * a device. This structure is not meant to be shared to users as it is + * meant for book keeping and private to OPP library. + * + * Because the opp structures can be used from both rcu and srcu readers, we + * need to wait for the grace period of both of them before freeing any + * resources. And so we have used kfree_rcu() from within call_srcu() handlers. + */ +struct device_opp { + struct list_head node; + + struct srcu_notifier_head srcu_head; + struct rcu_head rcu_head; + struct list_head dev_list; + struct list_head opp_list; + + struct device_node *np; + unsigned long clock_latency_ns_max; + bool shared_opp; + struct dev_pm_opp *suspend_opp; + + unsigned int *supported_hw; + unsigned int supported_hw_count; + const char *prop_name; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; + char dentry_name[NAME_MAX]; +#endif +}; + +/* Routines internal to opp core */ +struct device_opp *_find_device_opp(struct device *dev); +struct device_list_opp *_add_list_dev(const struct device *dev, + struct device_opp *dev_opp); +struct device_node *_of_get_opp_desc_node(struct device *dev); + +#ifdef CONFIG_DEBUG_FS +void opp_debug_remove_one(struct dev_pm_opp *opp); +int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp); +int opp_debug_register(struct device_list_opp *list_dev, + struct device_opp *dev_opp); +void opp_debug_unregister(struct device_list_opp *list_dev, + struct device_opp *dev_opp); +#else +static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} + +static inline int opp_debug_create_one(struct dev_pm_opp *opp, + struct device_opp *dev_opp) +{ return 0; } +static inline int opp_debug_register(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ return 0; } + +static inline void opp_debug_unregister(struct device_list_opp *list_dev, + struct device_opp *dev_opp) +{ } +#endif /* DEBUG_FS */ + +#endif /* __DRIVER_OPP_H__ */ diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 998fa6b23084..50e30e7b059d 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -18,6 +18,7 @@ static inline void pm_runtime_early_init(struct device *dev) } extern void pm_runtime_init(struct device *dev); +extern void pm_runtime_reinit(struct device *dev); extern void pm_runtime_remove(struct device *dev); struct wake_irq { @@ -84,6 +85,7 @@ static inline void pm_runtime_early_init(struct device *dev) } static inline void pm_runtime_init(struct device *dev) {} +static inline void pm_runtime_reinit(struct device *dev) {} static inline void pm_runtime_remove(struct device *dev) {} static inline int dpm_sysfs_add(struct device *dev) { return 0; } @@ -123,6 +125,7 @@ extern void device_pm_remove(struct device *); extern void device_pm_move_before(struct device *, struct device *); extern void device_pm_move_after(struct device *, struct device *); extern void device_pm_move_last(struct device *); +extern void device_pm_check_callbacks(struct device *dev); #else /* !CONFIG_PM_SLEEP */ @@ -141,6 +144,8 @@ static inline void device_pm_move_after(struct device *deva, struct device *devb) {} static inline void device_pm_move_last(struct device *dev) {} +static inline void device_pm_check_callbacks(struct device *dev) {} + #endif /* !CONFIG_PM_SLEEP */ static inline void device_pm_init(struct device *dev) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index e1a10a03df8e..4c7055009bd6 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -966,6 +966,30 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** + * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. + * @dev: Device to handle. + * + * Return -EINVAL if runtime PM is disabled for the device. + * + * If that's not the case and if the device's runtime PM status is RPM_ACTIVE + * and the runtime PM usage counter is nonzero, increment the counter and + * return 1. Otherwise return 0 without changing the counter. + */ +int pm_runtime_get_if_in_use(struct device *dev) +{ + unsigned long flags; + int retval; + + spin_lock_irqsave(&dev->power.lock, flags); + retval = dev->power.disable_depth > 0 ? -EINVAL : + dev->power.runtime_status == RPM_ACTIVE + && atomic_inc_not_zero(&dev->power.usage_count); + spin_unlock_irqrestore(&dev->power.lock, flags); + return retval; +} +EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); + +/** * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. * @status: New runtime PM status of the device. @@ -1390,18 +1414,32 @@ void pm_runtime_init(struct device *dev) } /** + * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. + * @dev: Device object to re-initialize. + */ +void pm_runtime_reinit(struct device *dev) +{ + if (!pm_runtime_enabled(dev)) { + if (dev->power.runtime_status == RPM_ACTIVE) + pm_runtime_set_suspended(dev); + if (dev->power.irq_safe) { + spin_lock_irq(&dev->power.lock); + dev->power.irq_safe = 0; + spin_unlock_irq(&dev->power.lock); + if (dev->parent) + pm_runtime_put(dev->parent); + } + } +} + +/** * pm_runtime_remove - Prepare for removing a device from device hierarchy. * @dev: Device object being removed from device hierarchy. */ void pm_runtime_remove(struct device *dev) { __pm_runtime_disable(dev, false); - - /* Change the status back to 'suspended' to match the initial status. */ - if (dev->power.runtime_status == RPM_ACTIVE) - pm_runtime_set_suspended(dev); - if (dev->power.irq_safe && dev->parent) - pm_runtime_put(dev->parent); + pm_runtime_reinit(dev); } /** diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index eb6e67451dec..0d77cd6fd8d1 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; @@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) struct wake_irq *wirq; int err; + if (irq < 0) + return -EINVAL; + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); if (!wirq) return -ENOMEM; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 51f15bc15774..a1e0b9ab847a 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -25,6 +25,9 @@ */ bool events_check_enabled __read_mostly; +/* First wakeup IRQ seen by the kernel in the last cycle. */ +unsigned int pm_wakeup_irq __read_mostly; + /* If set and the system is suspending, terminate the suspend. */ static bool pm_abort_suspend __read_mostly; @@ -91,7 +94,7 @@ struct wakeup_source *wakeup_source_create(const char *name) if (!ws) return NULL; - wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL); + wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL); return ws; } EXPORT_SYMBOL_GPL(wakeup_source_create); @@ -154,7 +157,7 @@ void wakeup_source_destroy(struct wakeup_source *ws) wakeup_source_drop(ws); wakeup_source_record(ws); - kfree(ws->name); + kfree_const(ws->name); kfree(ws); } EXPORT_SYMBOL_GPL(wakeup_source_destroy); @@ -868,6 +871,15 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_wakeup_clear(void) { pm_abort_suspend = false; + pm_wakeup_irq = 0; +} + +void pm_system_irq_wakeup(unsigned int irq_number) +{ + if (pm_wakeup_irq == 0) { + pm_wakeup_irq = irq_number; + pm_system_wakeup(); + } } /** diff --git a/drivers/base/property.c b/drivers/base/property.c index 2d75366c61e0..c359351d50f1 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -19,32 +19,14 @@ #include <linux/etherdevice.h> #include <linux/phy.h> -/** - * device_add_property_set - Add a collection of properties to a device object. - * @dev: Device to add properties to. - * @pset: Collection of properties to add. - * - * Associate a collection of device properties represented by @pset with @dev - * as its secondary firmware node. - */ -void device_add_property_set(struct device *dev, struct property_set *pset) -{ - if (!pset) - return; - - pset->fwnode.type = FWNODE_PDATA; - set_secondary_fwnode(dev, &pset->fwnode); -} -EXPORT_SYMBOL_GPL(device_add_property_set); - -static inline bool is_pset(struct fwnode_handle *fwnode) +static inline bool is_pset_node(struct fwnode_handle *fwnode) { return fwnode && fwnode->type == FWNODE_PDATA; } -static inline struct property_set *to_pset(struct fwnode_handle *fwnode) +static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) { - return is_pset(fwnode) ? + return is_pset_node(fwnode) ? container_of(fwnode, struct property_set, fwnode) : NULL; } @@ -63,45 +45,135 @@ static struct property_entry *pset_prop_get(struct property_set *pset, return NULL; } -static int pset_prop_read_array(struct property_set *pset, const char *name, - enum dev_prop_type type, void *val, size_t nval) +static void *pset_prop_find(struct property_set *pset, const char *propname, + size_t length) { struct property_entry *prop; - unsigned int item_size; + void *pointer; - prop = pset_prop_get(pset, name); + prop = pset_prop_get(pset, propname); if (!prop) - return -ENODATA; + return ERR_PTR(-EINVAL); + if (prop->is_array) + pointer = prop->pointer.raw_data; + else + pointer = &prop->value.raw_data; + if (!pointer) + return ERR_PTR(-ENODATA); + if (length > prop->length) + return ERR_PTR(-EOVERFLOW); + return pointer; +} + +static int pset_prop_read_u8_array(struct property_set *pset, + const char *propname, + u8 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_read_u16_array(struct property_set *pset, + const char *propname, + u16 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_read_u32_array(struct property_set *pset, + const char *propname, + u32 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_read_u64_array(struct property_set *pset, + const char *propname, + u64 *values, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*values); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(values, pointer, length); + return 0; +} + +static int pset_prop_count_elems_of_size(struct property_set *pset, + const char *propname, size_t length) +{ + struct property_entry *prop; + + prop = pset_prop_get(pset, propname); + if (!prop) + return -EINVAL; + + return prop->length / length; +} + +static int pset_prop_read_string_array(struct property_set *pset, + const char *propname, + const char **strings, size_t nval) +{ + void *pointer; + size_t length = nval * sizeof(*strings); + + pointer = pset_prop_find(pset, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); - if (prop->type != type) - return -EPROTO; - - if (!val) - return prop->nval; - - if (prop->nval < nval) - return -EOVERFLOW; - - switch (type) { - case DEV_PROP_U8: - item_size = sizeof(u8); - break; - case DEV_PROP_U16: - item_size = sizeof(u16); - break; - case DEV_PROP_U32: - item_size = sizeof(u32); - break; - case DEV_PROP_U64: - item_size = sizeof(u64); - break; - case DEV_PROP_STRING: - item_size = sizeof(const char *); - break; - default: + memcpy(strings, pointer, length); + return 0; +} + +static int pset_prop_read_string(struct property_set *pset, + const char *propname, const char **strings) +{ + struct property_entry *prop; + const char **pointer; + + prop = pset_prop_get(pset, propname); + if (!prop) return -EINVAL; + if (!prop->is_string) + return -EILSEQ; + if (prop->is_array) { + pointer = prop->pointer.str; + if (!pointer) + return -ENODATA; + } else { + pointer = &prop->value.str; + if (*pointer && strnlen(*pointer, prop->length) >= prop->length) + return -EILSEQ; } - memcpy(val, prop->value.raw_data, nval * item_size); + + *strings = *pointer; return 0; } @@ -124,6 +196,18 @@ bool device_property_present(struct device *dev, const char *propname) } EXPORT_SYMBOL_GPL(device_property_present); +static bool __fwnode_property_present(struct fwnode_handle *fwnode, + const char *propname) +{ + if (is_of_node(fwnode)) + return of_property_read_bool(to_of_node(fwnode), propname); + else if (is_acpi_node(fwnode)) + return !acpi_node_prop_get(fwnode, propname, NULL); + else if (is_pset_node(fwnode)) + return !!pset_prop_get(to_pset_node(fwnode), propname); + return false; +} + /** * fwnode_property_present - check if a property of a firmware node is present * @fwnode: Firmware node whose property to check @@ -131,12 +215,12 @@ EXPORT_SYMBOL_GPL(device_property_present); */ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) { - if (is_of_node(fwnode)) - return of_property_read_bool(to_of_node(fwnode), propname); - else if (is_acpi_node(fwnode)) - return !acpi_dev_prop_get(to_acpi_node(fwnode), propname, NULL); + bool ret; - return !!pset_prop_get(to_pset(fwnode), propname); + ret = __fwnode_property_present(fwnode, propname); + if (ret == false && fwnode && fwnode->secondary) + ret = __fwnode_property_present(fwnode->secondary, propname); + return ret; } EXPORT_SYMBOL_GPL(fwnode_property_present); @@ -287,25 +371,62 @@ int device_property_read_string(struct device *dev, const char *propname, } EXPORT_SYMBOL_GPL(device_property_read_string); -#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \ - (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \ +/** + * device_property_match_string - find a string in an array and return index + * @dev: Device to get the property of + * @propname: Name of the property holding the array + * @string: String to look for + * + * Find a given string in a string array and if it is found return the + * index back. + * + * Return: %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of strings, + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_match_string(struct device *dev, const char *propname, + const char *string) +{ + return fwnode_property_match_string(dev_fwnode(dev), propname, string); +} +EXPORT_SYMBOL_GPL(device_property_match_string); + +#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \ + (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \ : of_property_count_elems_of_size((node), (propname), sizeof(type)) -#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ -({ \ - int _ret_; \ - if (is_of_node(_fwnode_)) \ - _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ - _type_, _val_, _nval_); \ - else if (is_acpi_node(_fwnode_)) \ - _ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \ - _proptype_, _val_, _nval_); \ - else if (is_pset(_fwnode_)) \ - _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \ - _proptype_, _val_, _nval_); \ - else \ - _ret_ = -ENXIO; \ - _ret_; \ +#define PSET_PROP_READ_ARRAY(node, propname, type, val, nval) \ + (val) ? pset_prop_read_##type##_array((node), (propname), (val), (nval)) \ + : pset_prop_count_elems_of_size((node), (propname), sizeof(type)) + +#define FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ +({ \ + int _ret_; \ + if (is_of_node(_fwnode_)) \ + _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ + _type_, _val_, _nval_); \ + else if (is_acpi_node(_fwnode_)) \ + _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \ + _val_, _nval_); \ + else if (is_pset_node(_fwnode_)) \ + _ret_ = PSET_PROP_READ_ARRAY(to_pset_node(_fwnode_), _propname_, \ + _type_, _val_, _nval_); \ + else \ + _ret_ = -ENXIO; \ + _ret_; \ +}) + +#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ +({ \ + int _ret_; \ + _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ + _val_, _nval_); \ + if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \ + _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ + _proptype_, _val_, _nval_); \ + _ret_; \ }) /** @@ -412,6 +533,41 @@ int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, } EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); +static int __fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + if (is_of_node(fwnode)) + return val ? + of_property_read_string_array(to_of_node(fwnode), + propname, val, nval) : + of_property_count_strings(to_of_node(fwnode), propname); + else if (is_acpi_node(fwnode)) + return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, + val, nval); + else if (is_pset_node(fwnode)) + return val ? + pset_prop_read_string_array(to_pset_node(fwnode), + propname, val, nval) : + pset_prop_count_elems_of_size(to_pset_node(fwnode), + propname, + sizeof(const char *)); + return -ENXIO; +} + +static int __fwnode_property_read_string(struct fwnode_handle *fwnode, + const char *propname, const char **val) +{ + if (is_of_node(fwnode)) + return of_property_read_string(to_of_node(fwnode), propname, val); + else if (is_acpi_node(fwnode)) + return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, + val, 1); + else if (is_pset_node(fwnode)) + return pset_prop_read_string(to_pset_node(fwnode), propname, val); + return -ENXIO; +} + /** * fwnode_property_read_string_array - return string array property of a node * @fwnode: Firmware node to get the property of @@ -434,18 +590,13 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { - if (is_of_node(fwnode)) - return val ? - of_property_read_string_array(to_of_node(fwnode), - propname, val, nval) : - of_property_count_strings(to_of_node(fwnode), propname); - else if (is_acpi_node(fwnode)) - return acpi_dev_prop_read(to_acpi_node(fwnode), propname, - DEV_PROP_STRING, val, nval); - else if (is_pset(fwnode)) - return pset_prop_read_array(to_pset(fwnode), propname, - DEV_PROP_STRING, val, nval); - return -ENXIO; + int ret; + + ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); + if (ret == -EINVAL && fwnode && fwnode->secondary) + ret = __fwnode_property_read_string_array(fwnode->secondary, + propname, val, nval); + return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); @@ -467,18 +618,242 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val) { - if (is_of_node(fwnode)) - return of_property_read_string(to_of_node(fwnode), propname, val); - else if (is_acpi_node(fwnode)) - return acpi_dev_prop_read(to_acpi_node(fwnode), propname, - DEV_PROP_STRING, val, 1); + int ret; - return pset_prop_read_array(to_pset(fwnode), propname, - DEV_PROP_STRING, val, 1); + ret = __fwnode_property_read_string(fwnode, propname, val); + if (ret == -EINVAL && fwnode && fwnode->secondary) + ret = __fwnode_property_read_string(fwnode->secondary, + propname, val); + return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string); /** + * fwnode_property_match_string - find a string in an array and return index + * @fwnode: Firmware node to get the property of + * @propname: Name of the property holding the array + * @string: String to look for + * + * Find a given string in a string array and if it is found return the + * index back. + * + * Return: %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of strings, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_match_string(struct fwnode_handle *fwnode, + const char *propname, const char *string) +{ + const char **values; + int nval, ret, i; + + nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0); + if (nval < 0) + return nval; + + if (nval == 0) + return -ENODATA; + + values = kcalloc(nval, sizeof(*values), GFP_KERNEL); + if (!values) + return -ENOMEM; + + ret = fwnode_property_read_string_array(fwnode, propname, values, nval); + if (ret < 0) + goto out; + + ret = -ENODATA; + for (i = 0; i < nval; i++) { + if (!strcmp(values[i], string)) { + ret = i; + break; + } + } +out: + kfree(values); + return ret; +} +EXPORT_SYMBOL_GPL(fwnode_property_match_string); + +/** + * pset_free_set - releases memory allocated for copied property set + * @pset: Property set to release + * + * Function takes previously copied property set and releases all the + * memory allocated to it. + */ +static void pset_free_set(struct property_set *pset) +{ + const struct property_entry *prop; + size_t i, nval; + + if (!pset) + return; + + for (prop = pset->properties; prop->name; prop++) { + if (prop->is_array) { + if (prop->is_string && prop->pointer.str) { + nval = prop->length / sizeof(const char *); + for (i = 0; i < nval; i++) + kfree(prop->pointer.str[i]); + } + kfree(prop->pointer.raw_data); + } else if (prop->is_string) { + kfree(prop->value.str); + } + kfree(prop->name); + } + + kfree(pset->properties); + kfree(pset); +} + +static int pset_copy_entry(struct property_entry *dst, + const struct property_entry *src) +{ + const char **d, **s; + size_t i, nval; + + dst->name = kstrdup(src->name, GFP_KERNEL); + if (!dst->name) + return -ENOMEM; + + if (src->is_array) { + if (!src->length) + return -ENODATA; + + if (src->is_string) { + nval = src->length / sizeof(const char *); + dst->pointer.str = kcalloc(nval, sizeof(const char *), + GFP_KERNEL); + if (!dst->pointer.str) + return -ENOMEM; + + d = dst->pointer.str; + s = src->pointer.str; + for (i = 0; i < nval; i++) { + d[i] = kstrdup(s[i], GFP_KERNEL); + if (!d[i] && s[i]) + return -ENOMEM; + } + } else { + dst->pointer.raw_data = kmemdup(src->pointer.raw_data, + src->length, GFP_KERNEL); + if (!dst->pointer.raw_data) + return -ENOMEM; + } + } else if (src->is_string) { + dst->value.str = kstrdup(src->value.str, GFP_KERNEL); + if (!dst->value.str && src->value.str) + return -ENOMEM; + } else { + dst->value.raw_data = src->value.raw_data; + } + + dst->length = src->length; + dst->is_array = src->is_array; + dst->is_string = src->is_string; + + return 0; +} + +/** + * pset_copy_set - copies property set + * @pset: Property set to copy + * + * This function takes a deep copy of the given property set and returns + * pointer to the copy. Call device_free_property_set() to free resources + * allocated in this function. + * + * Return: Pointer to the new property set or error pointer. + */ +static struct property_set *pset_copy_set(const struct property_set *pset) +{ + const struct property_entry *entry; + struct property_set *p; + size_t i, n = 0; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + while (pset->properties[n].name) + n++; + + p->properties = kcalloc(n + 1, sizeof(*entry), GFP_KERNEL); + if (!p->properties) { + kfree(p); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < n; i++) { + int ret = pset_copy_entry(&p->properties[i], + &pset->properties[i]); + if (ret) { + pset_free_set(p); + return ERR_PTR(ret); + } + } + + return p; +} + +/** + * device_remove_property_set - Remove properties from a device object. + * @dev: Device whose properties to remove. + * + * The function removes properties previously associated to the device + * secondary firmware node with device_add_property_set(). Memory allocated + * to the properties will also be released. + */ +void device_remove_property_set(struct device *dev) +{ + struct fwnode_handle *fwnode; + + fwnode = dev_fwnode(dev); + if (!fwnode) + return; + /* + * Pick either primary or secondary node depending which one holds + * the pset. If there is no real firmware node (ACPI/DT) primary + * will hold the pset. + */ + if (!is_pset_node(fwnode)) + fwnode = fwnode->secondary; + if (!IS_ERR(fwnode) && is_pset_node(fwnode)) + pset_free_set(to_pset_node(fwnode)); + set_secondary_fwnode(dev, NULL); +} +EXPORT_SYMBOL_GPL(device_remove_property_set); + +/** + * device_add_property_set - Add a collection of properties to a device object. + * @dev: Device to add properties to. + * @pset: Collection of properties to add. + * + * Associate a collection of device properties represented by @pset with @dev + * as its secondary firmware node. The function takes a copy of @pset. + */ +int device_add_property_set(struct device *dev, const struct property_set *pset) +{ + struct property_set *p; + + if (!pset) + return -EINVAL; + + p = pset_copy_set(pset); + if (IS_ERR(p)) + return PTR_ERR(p); + + p->fwnode.type = FWNODE_PDATA; + set_secondary_fwnode(dev, &p->fwnode); + return 0; +} +EXPORT_SYMBOL_GPL(device_add_property_set); + +/** * device_get_next_child_node - Return the next child node handle for a device * @dev: Device to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. @@ -493,11 +868,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, if (node) return &node->fwnode; } else if (IS_ENABLED(CONFIG_ACPI)) { - struct acpi_device *node; - - node = acpi_get_next_child(dev, to_acpi_node(child)); - if (node) - return acpi_fwnode_handle(node); + return acpi_get_next_subnode(dev, child); } return NULL; } @@ -534,18 +905,34 @@ unsigned int device_get_child_node_count(struct device *dev) } EXPORT_SYMBOL_GPL(device_get_child_node_count); -bool device_dma_is_coherent(struct device *dev) +bool device_dma_supported(struct device *dev) { - bool coherent = false; - + /* For DT, this is always supported. + * For ACPI, this depends on CCA, which + * is determined by the acpi_dma_supported(). + */ if (IS_ENABLED(CONFIG_OF) && dev->of_node) - coherent = of_dma_is_coherent(dev->of_node); - else - acpi_check_dma(ACPI_COMPANION(dev), &coherent); + return true; + + return acpi_dma_supported(ACPI_COMPANION(dev)); +} +EXPORT_SYMBOL_GPL(device_dma_supported); - return coherent; +enum dev_dma_attr device_get_dma_attr(struct device *dev) +{ + enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED; + + if (IS_ENABLED(CONFIG_OF) && dev->of_node) { + if (of_dma_is_coherent(dev->of_node)) + attr = DEV_DMA_COHERENT; + else + attr = DEV_DMA_NON_COHERENT; + } else + attr = acpi_get_dma_attr(ACPI_COMPANION(dev)); + + return attr; } -EXPORT_SYMBOL_GPL(device_dma_is_coherent); +EXPORT_SYMBOL_GPL(device_get_dma_attr); /** * device_get_phy_mode - Get phy mode for given device diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index cc557886ab23..3df977054781 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -59,6 +59,7 @@ struct regmap { regmap_lock lock; regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ + gfp_t alloc_flags; struct device *dev; /* Device we do I/O on */ void *work_buf; /* Scratch buffer used to format I/O */ @@ -98,6 +99,8 @@ struct regmap { int (*reg_read)(void *context, unsigned int reg, unsigned int *val); int (*reg_write)(void *context, unsigned int reg, unsigned int val); + int (*reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); bool defer_caching; @@ -122,9 +125,9 @@ struct regmap { unsigned int num_reg_defaults_raw; /* if set, only the cache is modified not the HW */ - u32 cache_only; + bool cache_only; /* if set, only the HW is modified not the cache */ - u32 cache_bypass; + bool cache_bypass; /* if set, remember to free reg_defaults_raw */ bool cache_free; @@ -132,7 +135,7 @@ struct regmap { const void *reg_defaults_raw; void *cache; /* if set, the cache contains newer data than the HW */ - u32 cache_dirty; + bool cache_dirty; /* if set, the HW registers are known to match map->reg_defaults */ bool no_sync_defaults; diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c index 0246f44ded74..686c9e0b930e 100644 --- a/drivers/base/regmap/regcache-flat.c +++ b/drivers/base/regmap/regcache-flat.c @@ -21,7 +21,7 @@ static int regcache_flat_init(struct regmap *map) int i; unsigned int *cache; - map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1), + map->cache = kcalloc(map->max_register + 1, sizeof(unsigned int), GFP_KERNEL); if (!map->cache) return -ENOMEM; diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c index 2d53f6f138e1..6f77d7319fc6 100644 --- a/drivers/base/regmap/regcache-lzo.c +++ b/drivers/base/regmap/regcache-lzo.c @@ -139,7 +139,7 @@ static int regcache_lzo_init(struct regmap *map) ret = 0; blkcount = regcache_lzo_block_count(map); - map->cache = kzalloc(blkcount * sizeof *lzo_blocks, + map->cache = kcalloc(blkcount, sizeof(*lzo_blocks), GFP_KERNEL); if (!map->cache) return -ENOMEM; @@ -152,8 +152,8 @@ static int regcache_lzo_init(struct regmap *map) * that register. */ bmp_size = map->num_reg_defaults_raw; - sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long), - GFP_KERNEL); + sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long), + GFP_KERNEL); if (!sync_bmp) { ret = -ENOMEM; goto err; @@ -355,9 +355,9 @@ static int regcache_lzo_sync(struct regmap *map, unsigned int min, if (ret > 0 && val == map->reg_defaults[ret].def) continue; - map->cache_bypass = 1; + map->cache_bypass = true; ret = _regmap_write(map, i, val); - map->cache_bypass = 0; + map->cache_bypass = false; if (ret) return ret; dev_dbg(map->dev, "Synced register %#x, value %#x\n", diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 56486d92c4e7..aa56af87d941 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -361,13 +361,14 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) rbnode->base_reg = reg; } - rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, - GFP_KERNEL); + rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, + GFP_KERNEL); if (!rbnode->block) goto err_free; - rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) * - sizeof(*rbnode->cache_present), GFP_KERNEL); + rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), + sizeof(*rbnode->cache_present), + GFP_KERNEL); if (!rbnode->cache_present) goto err_free_block; @@ -413,8 +414,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, max = reg + max_dist; /* look for an adjacent register to the one we are about to add */ - for (node = rb_first(&rbtree_ctx->root); node; - node = rb_next(node)) { + node = rbtree_ctx->root.rb_node; + while (node) { rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); @@ -425,6 +426,11 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, new_base_reg = min(reg, base_reg); new_top_reg = max(reg, top_reg); } else { + if (max < base_reg) + node = node->rb_left; + else + node = node->rb_right; + continue; } diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 6f8a13ec32a4..348be3a35410 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -54,11 +54,11 @@ static int regcache_hw_init(struct regmap *map) return -ENOMEM; if (!map->reg_defaults_raw) { - u32 cache_bypass = map->cache_bypass; + bool cache_bypass = map->cache_bypass; dev_warn(map->dev, "No cache defaults, reading back from HW\n"); /* Bypass the cache access till data read from HW*/ - map->cache_bypass = 1; + map->cache_bypass = true; tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); if (!tmp_buf) { ret = -ENOMEM; @@ -100,15 +100,25 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) int i; void *tmp_buf; - for (i = 0; i < config->num_reg_defaults; i++) - if (config->reg_defaults[i].reg % map->reg_stride) - return -EINVAL; - if (map->cache_type == REGCACHE_NONE) { + if (config->reg_defaults || config->num_reg_defaults_raw) + dev_warn(map->dev, + "No cache used with register defaults set!\n"); + map->cache_bypass = true; return 0; } + if (config->reg_defaults && !config->num_reg_defaults) { + dev_err(map->dev, + "Register defaults are set without the number!\n"); + return -EINVAL; + } + + for (i = 0; i < config->num_reg_defaults; i++) + if (config->reg_defaults[i].reg % map->reg_stride) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(cache_types); i++) if (cache_types[i]->type == map->cache_type) break; @@ -138,8 +148,6 @@ int regcache_init(struct regmap *map, const struct regmap_config *config) * a copy of it. */ if (config->reg_defaults) { - if (!map->num_reg_defaults) - return -EINVAL; tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * sizeof(struct reg_default), GFP_KERNEL); if (!tmp_buf) @@ -285,9 +293,9 @@ static int regcache_default_sync(struct regmap *map, unsigned int min, if (!regcache_reg_needs_sync(map, reg, val)) continue; - map->cache_bypass = 1; + map->cache_bypass = true; ret = _regmap_write(map, reg, val); - map->cache_bypass = 0; + map->cache_bypass = false; if (ret) { dev_err(map->dev, "Unable to sync register %#x. %d\n", reg, ret); @@ -315,7 +323,7 @@ int regcache_sync(struct regmap *map) int ret = 0; unsigned int i; const char *name; - unsigned int bypass; + bool bypass; BUG_ON(!map->cache_ops); @@ -333,7 +341,7 @@ int regcache_sync(struct regmap *map) map->async = true; /* Apply any patch first */ - map->cache_bypass = 1; + map->cache_bypass = true; for (i = 0; i < map->patch_regs; i++) { ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); if (ret != 0) { @@ -342,7 +350,7 @@ int regcache_sync(struct regmap *map) goto out; } } - map->cache_bypass = 0; + map->cache_bypass = false; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, 0, map->max_register); @@ -384,7 +392,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, { int ret = 0; const char *name; - unsigned int bypass; + bool bypass; BUG_ON(!map->cache_ops); @@ -535,19 +543,30 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, switch (map->cache_word_size) { case 1: { u8 *cache = base; + cache[idx] = val; break; } case 2: { u16 *cache = base; + cache[idx] = val; break; } case 4: { u32 *cache = base; + + cache[idx] = val; + break; + } +#ifdef CONFIG_64BIT + case 8: { + u64 *cache = base; + cache[idx] = val; break; } +#endif default: BUG(); } @@ -568,16 +587,26 @@ unsigned int regcache_get_val(struct regmap *map, const void *base, switch (map->cache_word_size) { case 1: { const u8 *cache = base; + return cache[idx]; } case 2: { const u16 *cache = base; + return cache[idx]; } case 4: { const u32 *cache = base; + return cache[idx]; } +#ifdef CONFIG_64BIT + case 8: { + const u64 *cache = base; + + return cache[idx]; + } +#endif default: BUG(); } @@ -637,11 +666,11 @@ static int regcache_sync_block_single(struct regmap *map, void *block, if (!regcache_reg_needs_sync(map, regtmp, val)) continue; - map->cache_bypass = 1; + map->cache_bypass = true; ret = _regmap_write(map, regtmp, val); - map->cache_bypass = 0; + map->cache_bypass = false; if (ret != 0) { dev_err(map->dev, "Unable to sync register %#x. %d\n", regtmp, ret); @@ -668,14 +697,14 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", count * val_bytes, count, base, cur - map->reg_stride); - map->cache_bypass = 1; + map->cache_bypass = true; ret = _regmap_raw_write(map, base, *data, count * val_bytes); if (ret) dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", base, cur - map->reg_stride, ret); - map->cache_bypass = 0; + map->cache_bypass = false; *data = NULL; diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 4c55cfbad19e..1ee3d40861c7 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -30,7 +30,7 @@ static LIST_HEAD(regmap_debugfs_early_list); static DEFINE_MUTEX(regmap_debugfs_early_lock); /* Calculate the length of a fixed format */ -static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) +static size_t regmap_calc_reg_len(int max_val) { return snprintf(NULL, 0, "%x", max_val); } @@ -173,8 +173,7 @@ static inline void regmap_calc_tot_len(struct regmap *map, { /* Calculate the length of a fixed format */ if (!map->debugfs_tot_len) { - map->debugfs_reg_len = regmap_calc_reg_len(map->max_register, - buf, count); + map->debugfs_reg_len = regmap_calc_reg_len(map->max_register), map->debugfs_val_len = 2 * map->format.val_bytes; map->debugfs_tot_len = map->debugfs_reg_len + map->debugfs_val_len + 3; /* : \n */ @@ -338,6 +337,7 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file, char *buf; char *entry; int ret; + unsigned entry_len; if (*ppos < 0 || !count) return -EINVAL; @@ -365,18 +365,15 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file, p = 0; mutex_lock(&map->cache_lock); list_for_each_entry(c, &map->debugfs_off_cache, list) { - snprintf(entry, PAGE_SIZE, "%x-%x", - c->base_reg, c->max_reg); + entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", + c->base_reg, c->max_reg); if (p >= *ppos) { - if (buf_pos + 1 + strlen(entry) > count) + if (buf_pos + entry_len > count) break; - snprintf(buf + buf_pos, count - buf_pos, - "%s", entry); - buf_pos += strlen(entry); - buf[buf_pos] = '\n'; - buf_pos++; + memcpy(buf + buf_pos, entry, entry_len); + buf_pos += entry_len; } - p += strlen(entry) + 1; + p += entry_len; } mutex_unlock(&map->cache_lock); @@ -400,72 +397,39 @@ static const struct file_operations regmap_reg_ranges_fops = { .llseek = default_llseek, }; -static ssize_t regmap_access_read_file(struct file *file, - char __user *user_buf, size_t count, - loff_t *ppos) +static int regmap_access_show(struct seq_file *s, void *ignored) { - int reg_len, tot_len; - size_t buf_pos = 0; - loff_t p = 0; - ssize_t ret; - int i; - struct regmap *map = file->private_data; - char *buf; - - if (*ppos < 0 || !count) - return -EINVAL; + struct regmap *map = s->private; + int i, reg_len; - buf = kmalloc(count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - /* Calculate the length of a fixed format */ - reg_len = regmap_calc_reg_len(map->max_register, buf, count); - tot_len = reg_len + 10; /* ': R W V P\n' */ + reg_len = regmap_calc_reg_len(map->max_register); for (i = 0; i <= map->max_register; i += map->reg_stride) { /* Ignore registers which are neither readable nor writable */ if (!regmap_readable(map, i) && !regmap_writeable(map, i)) continue; - /* If we're in the region the user is trying to read */ - if (p >= *ppos) { - /* ...but not beyond it */ - if (buf_pos + tot_len + 1 >= count) - break; - - /* Format the register */ - snprintf(buf + buf_pos, count - buf_pos, - "%.*x: %c %c %c %c\n", - reg_len, i, - regmap_readable(map, i) ? 'y' : 'n', - regmap_writeable(map, i) ? 'y' : 'n', - regmap_volatile(map, i) ? 'y' : 'n', - regmap_precious(map, i) ? 'y' : 'n'); - - buf_pos += tot_len; - } - p += tot_len; - } - - ret = buf_pos; - - if (copy_to_user(user_buf, buf, buf_pos)) { - ret = -EFAULT; - goto out; + /* Format the register */ + seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, + regmap_readable(map, i) ? 'y' : 'n', + regmap_writeable(map, i) ? 'y' : 'n', + regmap_volatile(map, i) ? 'y' : 'n', + regmap_precious(map, i) ? 'y' : 'n'); } - *ppos += buf_pos; + return 0; +} -out: - kfree(buf); - return ret; +static int access_open(struct inode *inode, struct file *file) +{ + return single_open(file, regmap_access_show, inode->i_private); } static const struct file_operations regmap_access_fops = { - .open = simple_open, - .read = regmap_access_read_file, - .llseek = default_llseek, + .open = access_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; static ssize_t regmap_cache_only_write_file(struct file *file, diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 38d1f72d869c..9b0d202414d0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -39,8 +39,11 @@ struct regmap_irq_chip_data { unsigned int *mask_buf; unsigned int *mask_buf_def; unsigned int *wake_buf; + unsigned int *type_buf; + unsigned int *type_buf_def; unsigned int irq_reg_stride; + unsigned int type_reg_stride; }; static inline const @@ -63,6 +66,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data) struct regmap *map = d->map; int i, ret; u32 reg; + u32 unmask_offset; if (d->chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); @@ -79,12 +83,28 @@ static void regmap_irq_sync_unlock(struct irq_data *data) for (i = 0; i < d->chip->num_regs; i++) { reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); - if (d->chip->mask_invert) + if (d->chip->mask_invert) { ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->mask_buf[i]); - else + } else if (d->chip->unmask_base) { + /* set mask with mask_base register */ + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], ~d->mask_buf[i]); + if (ret < 0) + dev_err(d->map->dev, + "Failed to sync unmasks in %x\n", + reg); + unmask_offset = d->chip->unmask_base - + d->chip->mask_base; + /* clear mask with unmask_base register */ + ret = regmap_update_bits(d->map, + reg + unmask_offset, + d->mask_buf_def[i], + d->mask_buf[i]); + } else { ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->mask_buf[i]); + } if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); @@ -116,13 +136,33 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { reg = d->chip->ack_base + (i * map->reg_stride * d->irq_reg_stride); - ret = regmap_write(map, reg, d->mask_buf[i]); + /* some chips ack by write 0 */ + if (d->chip->ack_invert) + ret = regmap_write(map, reg, ~d->mask_buf[i]); + else + ret = regmap_write(map, reg, d->mask_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", reg, ret); } } + for (i = 0; i < d->chip->num_type_reg; i++) { + if (!d->type_buf_def[i]) + continue; + reg = d->chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (d->chip->type_invert) + ret = regmap_update_bits(d->map, reg, + d->type_buf_def[i], ~d->type_buf[i]); + else + ret = regmap_update_bits(d->map, reg, + d->type_buf_def[i], d->type_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to sync type in %x\n", + reg); + } + if (d->chip->runtime_pm) pm_runtime_put(map->dev); @@ -157,6 +197,38 @@ static void regmap_irq_disable(struct irq_data *data) d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; } +static int regmap_irq_set_type(struct irq_data *data, unsigned int type) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + int reg = irq_data->type_reg_offset / map->reg_stride; + + if (!(irq_data->type_rising_mask | irq_data->type_falling_mask)) + return 0; + + d->type_buf[reg] &= ~(irq_data->type_falling_mask | + irq_data->type_rising_mask); + switch (type) { + case IRQ_TYPE_EDGE_FALLING: + d->type_buf[reg] |= irq_data->type_falling_mask; + break; + + case IRQ_TYPE_EDGE_RISING: + d->type_buf[reg] |= irq_data->type_rising_mask; + break; + + case IRQ_TYPE_EDGE_BOTH: + d->type_buf[reg] |= (irq_data->type_falling_mask | + irq_data->type_rising_mask); + break; + + default: + return -EINVAL; + } + return 0; +} + static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) { struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); @@ -183,6 +255,7 @@ static const struct irq_chip regmap_irq_chip = { .irq_bus_sync_unlock = regmap_irq_sync_unlock, .irq_disable = regmap_irq_disable, .irq_enable = regmap_irq_enable, + .irq_set_type = regmap_irq_set_type, .irq_set_wake = regmap_irq_set_wake, }; @@ -339,6 +412,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int i; int ret = -ENOMEM; u32 reg; + u32 unmask_offset; if (chip->num_regs <= 0) return -EINVAL; @@ -364,28 +438,40 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; - d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->status_buf) goto err_alloc; - d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->mask_buf) goto err_alloc; - d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->mask_buf_def) goto err_alloc; if (chip->wake_base) { - d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->wake_buf) goto err_alloc; } + if (chip->num_type_reg) { + d->type_buf_def = kcalloc(chip->num_type_reg, + sizeof(unsigned int), GFP_KERNEL); + if (!d->type_buf_def) + goto err_alloc; + + d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int), + GFP_KERNEL); + if (!d->type_buf) + goto err_alloc; + } + d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; d->irq = irq; @@ -398,10 +484,16 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else d->irq_reg_stride = 1; + if (chip->type_reg_stride) + d->type_reg_stride = chip->type_reg_stride; + else + d->type_reg_stride = 1; + if (!map->use_single_read && map->reg_stride == 1 && d->irq_reg_stride == 1) { - d->status_reg_buf = kmalloc(map->format.val_bytes * - chip->num_regs, GFP_KERNEL); + d->status_reg_buf = kmalloc_array(chip->num_regs, + map->format.val_bytes, + GFP_KERNEL); if (!d->status_reg_buf) goto err_alloc; } @@ -420,7 +512,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (chip->mask_invert) ret = regmap_update_bits(map, reg, d->mask_buf[i], ~d->mask_buf[i]); - else + else if (d->chip->unmask_base) { + unmask_offset = d->chip->unmask_base - + d->chip->mask_base; + ret = regmap_update_bits(d->map, + reg + unmask_offset, + d->mask_buf[i], + d->mask_buf[i]); + } else ret = regmap_update_bits(map, reg, d->mask_buf[i], d->mask_buf[i]); if (ret != 0) { @@ -445,7 +544,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { reg = chip->ack_base + (i * map->reg_stride * d->irq_reg_stride); - ret = regmap_write(map, reg, + if (chip->ack_invert) + ret = regmap_write(map, reg, + ~(d->status_buf[i] & d->mask_buf[i])); + else + ret = regmap_write(map, reg, d->status_buf[i] & d->mask_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -478,6 +581,33 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, } } + if (chip->num_type_reg) { + for (i = 0; i < chip->num_irqs; i++) { + reg = chip->irqs[i].type_reg_offset / map->reg_stride; + d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask | + chip->irqs[i].type_falling_mask; + } + for (i = 0; i < chip->num_type_reg; ++i) { + if (!d->type_buf_def[i]) + continue; + + reg = chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (chip->type_invert) + ret = regmap_update_bits(map, reg, + d->type_buf_def[i], 0xFF); + else + ret = regmap_update_bits(map, reg, + d->type_buf_def[i], 0x0); + if (ret != 0) { + dev_err(map->dev, + "Failed to set type in 0x%x: %x\n", + reg, ret); + goto err_alloc; + } + } + } + if (irq_base) d->domain = irq_domain_add_legacy(map->dev->of_node, chip->num_irqs, irq_base, 0, @@ -508,6 +638,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, err_domain: /* Should really dispose of the domain but... */ err_alloc: + kfree(d->type_buf); + kfree(d->type_buf_def); kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); @@ -531,6 +663,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) free_irq(irq, d); irq_domain_remove(d->domain); + kfree(d->type_buf); + kfree(d->type_buf_def); kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 426a57e41ac7..eea51569f0eb 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -61,6 +61,33 @@ static int regmap_mmio_regbits_check(size_t reg_bits) } } +static int regmap_mmio_get_min_stride(size_t val_bits) +{ + int min_stride; + + switch (val_bits) { + case 8: + /* The core treats 0 as 1 */ + min_stride = 0; + return 0; + case 16: + min_stride = 2; + break; + case 32: + min_stride = 4; + break; +#ifdef CONFIG_64BIT + case 64: + min_stride = 8; + break; +#endif + default: + return -EINVAL; + } + + return min_stride; +} + static inline void regmap_mmio_count_check(size_t count, u32 offset) { BUG_ON(count <= offset); @@ -231,26 +258,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, if (config->pad_bits) return ERR_PTR(-EINVAL); - switch (config->val_bits) { - case 8: - /* The core treats 0 as 1 */ - min_stride = 0; - break; - case 16: - min_stride = 2; - break; - case 32: - min_stride = 4; - break; -#ifdef CONFIG_64BIT - case 64: - min_stride = 8; - break; -#endif - break; - default: - return ERR_PTR(-EINVAL); - } + min_stride = regmap_mmio_get_min_stride(config->val_bits); + if (min_stride < 0) + return ERR_PTR(min_stride); if (config->reg_stride < min_stride) return ERR_PTR(-EINVAL); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index afaf56200674..ee54e841de4a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -245,6 +245,28 @@ static void regmap_format_32_native(void *buf, unsigned int val, *(u32 *)buf = val << shift; } +#ifdef CONFIG_64BIT +static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) +{ + __be64 *b = buf; + + b[0] = cpu_to_be64((u64)val << shift); +} + +static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) +{ + __le64 *b = buf; + + b[0] = cpu_to_le64((u64)val << shift); +} + +static void regmap_format_64_native(void *buf, unsigned int val, + unsigned int shift) +{ + *(u64 *)buf = (u64)val << shift; +} +#endif + static void regmap_parse_inplace_noop(void *buf) { } @@ -332,6 +354,41 @@ static unsigned int regmap_parse_32_native(const void *buf) return *(u32 *)buf; } +#ifdef CONFIG_64BIT +static unsigned int regmap_parse_64_be(const void *buf) +{ + const __be64 *b = buf; + + return be64_to_cpu(b[0]); +} + +static unsigned int regmap_parse_64_le(const void *buf) +{ + const __le64 *b = buf; + + return le64_to_cpu(b[0]); +} + +static void regmap_parse_64_be_inplace(void *buf) +{ + __be64 *b = buf; + + b[0] = be64_to_cpu(b[0]); +} + +static void regmap_parse_64_le_inplace(void *buf) +{ + __le64 *b = buf; + + b[0] = le64_to_cpu(b[0]); +} + +static unsigned int regmap_parse_64_native(const void *buf) +{ + return *(u64 *)buf; +} +#endif + static void regmap_lock_mutex(void *__map) { struct regmap *map = __map; @@ -561,6 +618,16 @@ struct regmap *__regmap_init(struct device *dev, } map->lock_arg = map; } + + /* + * When we write in fast-paths with regmap_bulk_write() don't allocate + * scratch buffers with sleeping allocations. + */ + if ((bus && bus->fast_io) || config->fast_io) + map->alloc_flags = GFP_ATOMIC; + else + map->alloc_flags = GFP_KERNEL; + map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map->format.pad_bytes = config->pad_bits / 8; map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); @@ -619,6 +686,7 @@ struct regmap *__regmap_init(struct device *dev, goto skip_format_initialization; } else { map->reg_read = _regmap_bus_read; + map->reg_update_bits = bus->reg_update_bits; } reg_endian = regmap_get_reg_endian(bus, config); @@ -701,6 +769,21 @@ struct regmap *__regmap_init(struct device *dev, } break; +#ifdef CONFIG_64BIT + case 64: + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_64_be; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_64_native; + break; + default: + goto err_map; + } + break; +#endif + default: goto err_map; } @@ -760,6 +843,28 @@ struct regmap *__regmap_init(struct device *dev, goto err_map; } break; +#ifdef CONFIG_64BIT + case 64: + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_64_be; + map->format.parse_val = regmap_parse_64_be; + map->format.parse_inplace = regmap_parse_64_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_64_le; + map->format.parse_val = regmap_parse_64_le; + map->format.parse_inplace = regmap_parse_64_le_inplace; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_64_native; + map->format.parse_val = regmap_parse_64_native; + break; + default: + goto err_map; + } + break; +#endif } if (map->format.format_write) { @@ -1502,7 +1607,7 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) { int ret; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -1529,7 +1634,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) { int ret; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -1703,7 +1808,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, if (map->bus && !map->format.parse_inplace) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; /* @@ -1786,7 +1891,7 @@ out: if (!val_count) return -EINVAL; - wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); + wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); if (!wval) { dev_err(map->dev, "Error in memory allocation\n"); return -ENOMEM; @@ -1972,7 +2077,7 @@ static int _regmap_multi_reg_write(struct regmap *map, int reg = regs[i].reg; if (!map->writeable_reg(map->dev, reg)) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; } @@ -2122,7 +2227,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, if (val_len % map->format.val_bytes) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -2249,7 +2354,7 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) { int ret; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; map->lock(map->lock_arg); @@ -2285,7 +2390,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; if (val_count == 0) return -EINVAL; @@ -2403,7 +2508,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_bytes = map->format.val_bytes; bool vol = regmap_volatile_range(map, reg, val_count); - if (reg % map->reg_stride) + if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { @@ -2477,11 +2582,19 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, * we assume that the values are native * endian. */ +#ifdef CONFIG_64BIT + u64 *u64 = val; +#endif u32 *u32 = val; u16 *u16 = val; u8 *u8 = val; switch (map->format.val_bytes) { +#ifdef CONFIG_64BIT + case 8: + u64[i] = ival; + break; +#endif case 4: u32[i] = ival; break; @@ -2509,20 +2622,26 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, int ret; unsigned int tmp, orig; - ret = _regmap_read(map, reg, &orig); - if (ret != 0) - return ret; + if (change) + *change = false; - tmp = orig & ~mask; - tmp |= val & mask; - - if (force_write || (tmp != orig)) { - ret = _regmap_write(map, reg, tmp); - if (change) + if (regmap_volatile(map, reg) && map->reg_update_bits) { + ret = map->reg_update_bits(map->bus_context, reg, mask, val); + if (ret == 0 && change) *change = true; } else { - if (change) - *change = false; + ret = _regmap_read(map, reg, &orig); + if (ret != 0) + return ret; + + tmp = orig & ~mask; + tmp |= val & mask; + + if (force_write || (tmp != orig)) { + ret = _regmap_write(map, reg, tmp); + if (ret == 0 && change) + *change = true; + } } return ret; diff --git a/drivers/base/soc.c b/drivers/base/soc.c index 39fca01c8fa1..75b98aad6faf 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -16,7 +16,6 @@ #include <linux/err.h> static DEFINE_IDA(soc_ida); -static DEFINE_SPINLOCK(soc_lock); static ssize_t soc_info_get(struct device *dev, struct device_attribute *attr, @@ -122,20 +121,10 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr } /* Fetch a unique (reclaimable) SOC ID. */ - do { - if (!ida_pre_get(&soc_ida, GFP_KERNEL)) { - ret = -ENOMEM; - goto out2; - } - - spin_lock(&soc_lock); - ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num); - spin_unlock(&soc_lock); - - } while (ret == -EAGAIN); - - if (ret) + ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL); + if (ret < 0) goto out2; + soc_dev->soc_dev_num = ret; soc_dev->attr = soc_dev_attr; soc_dev->dev.bus = &soc_bus_type; @@ -151,7 +140,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr return soc_dev; out3: - ida_remove(&soc_ida, soc_dev->soc_dev_num); + ida_simple_remove(&soc_ida, soc_dev->soc_dev_num); out2: kfree(soc_dev); out1: @@ -161,7 +150,7 @@ out1: /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ void soc_device_unregister(struct soc_device *soc_dev) { - ida_remove(&soc_ida, soc_dev->soc_dev_num); + ida_simple_remove(&soc_ida, soc_dev->soc_dev_num); device_unregister(&soc_dev->dev); } |