diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/core.c | 130 | ||||
-rw-r--r-- | drivers/base/cpu.c | 101 | ||||
-rw-r--r-- | drivers/base/memory.c | 114 | ||||
-rw-r--r-- | drivers/base/pinctrl.c | 19 | ||||
-rw-r--r-- | drivers/base/platform.c | 1 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 1 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 23 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 4 | ||||
-rw-r--r-- | drivers/base/power/qos.c | 6 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 12 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 9 | ||||
-rw-r--r-- | drivers/base/regmap/internal.h | 10 | ||||
-rw-r--r-- | drivers/base/regmap/regcache-rbtree.c | 62 | ||||
-rw-r--r-- | drivers/base/regmap/regcache.c | 83 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 8 | ||||
-rw-r--r-- | drivers/base/regmap/regmap.c | 156 |
16 files changed, 575 insertions, 164 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c index 6fdc53d46fa0..dc3ea237f086 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -403,6 +403,36 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, static struct device_attribute uevent_attr = __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent); +static ssize_t show_online(struct device *dev, struct device_attribute *attr, + char *buf) +{ + bool val; + + lock_device_hotplug(); + val = !dev->offline; + unlock_device_hotplug(); + return sprintf(buf, "%u\n", val); +} + +static ssize_t store_online(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + bool val; + int ret; + + ret = strtobool(buf, &val); + if (ret < 0) + return ret; + + lock_device_hotplug(); + ret = val ? device_online(dev) : device_offline(dev); + unlock_device_hotplug(); + return ret < 0 ? ret : count; +} + +static struct device_attribute online_attr = + __ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online); + static int device_add_attributes(struct device *dev, struct device_attribute *attrs) { @@ -516,6 +546,12 @@ static int device_add_attrs(struct device *dev) if (error) goto err_remove_type_groups; + if (device_supports_offline(dev) && !dev->offline_disabled) { + error = device_create_file(dev, &online_attr); + if (error) + goto err_remove_type_groups; + } + return 0; err_remove_type_groups: @@ -536,6 +572,7 @@ static void device_remove_attrs(struct device *dev) struct class *class = dev->class; const struct device_type *type = dev->type; + device_remove_file(dev, &online_attr); device_remove_groups(dev, dev->groups); if (type) @@ -1435,6 +1472,99 @@ EXPORT_SYMBOL_GPL(put_device); EXPORT_SYMBOL_GPL(device_create_file); EXPORT_SYMBOL_GPL(device_remove_file); +static DEFINE_MUTEX(device_hotplug_lock); + +void lock_device_hotplug(void) +{ + mutex_lock(&device_hotplug_lock); +} + +void unlock_device_hotplug(void) +{ + mutex_unlock(&device_hotplug_lock); +} + +static int device_check_offline(struct device *dev, void *not_used) +{ + int ret; + + ret = device_for_each_child(dev, NULL, device_check_offline); + if (ret) + return ret; + + return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; +} + +/** + * device_offline - Prepare the device for hot-removal. + * @dev: Device to be put offline. + * + * Execute the device bus type's .offline() callback, if present, to prepare + * the device for a subsequent hot-removal. If that succeeds, the device must + * not be used until either it is removed or its bus type's .online() callback + * is executed. + * + * Call under device_hotplug_lock. + */ +int device_offline(struct device *dev) +{ + int ret; + + if (dev->offline_disabled) + return -EPERM; + + ret = device_for_each_child(dev, NULL, device_check_offline); + if (ret) + return ret; + + device_lock(dev); + if (device_supports_offline(dev)) { + if (dev->offline) { + ret = 1; + } else { + ret = dev->bus->offline(dev); + if (!ret) { + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); + dev->offline = true; + } + } + } + device_unlock(dev); + + return ret; +} + +/** + * device_online - Put the device back online after successful device_offline(). + * @dev: Device to be put back online. + * + * If device_offline() has been successfully executed for @dev, but the device + * has not been removed subsequently, execute its bus type's .online() callback + * to indicate that the device can be used again. + * + * Call under device_hotplug_lock. + */ +int device_online(struct device *dev) +{ + int ret = 0; + + device_lock(dev); + if (device_supports_offline(dev)) { + if (dev->offline) { + ret = dev->bus->online(dev); + if (!ret) { + kobject_uevent(&dev->kobj, KOBJ_ONLINE); + dev->offline = false; + } + } else { + ret = 1; + } + } + device_unlock(dev); + + return ret; +} + struct root_device { struct device dev; struct module *owner; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index c377673320ed..a16d20e389f0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -13,17 +13,21 @@ #include <linux/gfp.h> #include <linux/slab.h> #include <linux/percpu.h> +#include <linux/acpi.h> #include "base.h" -struct bus_type cpu_subsys = { - .name = "cpu", - .dev_name = "cpu", -}; -EXPORT_SYMBOL_GPL(cpu_subsys); - static DEFINE_PER_CPU(struct device *, cpu_sys_devices); +static int cpu_subsys_match(struct device *dev, struct device_driver *drv) +{ + /* ACPI style match is the only one that may succeed. */ + if (acpi_driver_match_device(dev, drv)) + return 1; + + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU static void change_cpu_under_node(struct cpu *cpu, unsigned int from_nid, unsigned int to_nid) @@ -34,65 +38,38 @@ static void change_cpu_under_node(struct cpu *cpu, cpu->node_id = to_nid; } -static ssize_t show_online(struct device *dev, - struct device_attribute *attr, - char *buf) +static int __ref cpu_subsys_online(struct device *dev) { struct cpu *cpu = container_of(dev, struct cpu, dev); + int cpuid = dev->id; + int from_nid, to_nid; + int ret; + + cpu_hotplug_driver_lock(); - return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id)); + from_nid = cpu_to_node(cpuid); + ret = cpu_up(cpuid); + /* + * When hot adding memory to memoryless node and enabling a cpu + * on the node, node number of the cpu may internally change. + */ + to_nid = cpu_to_node(cpuid); + if (from_nid != to_nid) + change_cpu_under_node(cpu, from_nid, to_nid); + + cpu_hotplug_driver_unlock(); + return ret; } -static ssize_t __ref store_online(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int cpu_subsys_offline(struct device *dev) { - struct cpu *cpu = container_of(dev, struct cpu, dev); - int cpuid = cpu->dev.id; - int from_nid, to_nid; - ssize_t ret; + int ret; cpu_hotplug_driver_lock(); - switch (buf[0]) { - case '0': - ret = cpu_down(cpuid); - if (!ret) - kobject_uevent(&dev->kobj, KOBJ_OFFLINE); - break; - case '1': - from_nid = cpu_to_node(cpuid); - ret = cpu_up(cpuid); - - /* - * When hot adding memory to memoryless node and enabling a cpu - * on the node, node number of the cpu may internally change. - */ - to_nid = cpu_to_node(cpuid); - if (from_nid != to_nid) - change_cpu_under_node(cpu, from_nid, to_nid); - - if (!ret) - kobject_uevent(&dev->kobj, KOBJ_ONLINE); - break; - default: - ret = -EINVAL; - } + ret = cpu_down(dev->id); cpu_hotplug_driver_unlock(); - - if (ret >= 0) - ret = count; return ret; } -static DEVICE_ATTR(online, 0644, show_online, store_online); - -static struct attribute *hotplug_cpu_attrs[] = { - &dev_attr_online.attr, - NULL -}; - -static struct attribute_group hotplug_cpu_attr_group = { - .attrs = hotplug_cpu_attrs, -}; void unregister_cpu(struct cpu *cpu) { @@ -127,6 +104,17 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #endif /* CONFIG_HOTPLUG_CPU */ +struct bus_type cpu_subsys = { + .name = "cpu", + .dev_name = "cpu", + .match = cpu_subsys_match, +#ifdef CONFIG_HOTPLUG_CPU + .online = cpu_subsys_online, + .offline = cpu_subsys_offline, +#endif +}; +EXPORT_SYMBOL_GPL(cpu_subsys); + #ifdef CONFIG_KEXEC #include <linux/kexec.h> @@ -185,9 +173,6 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif -#ifdef CONFIG_HOTPLUG_CPU - &hotplug_cpu_attr_group, -#endif NULL }; @@ -302,6 +287,8 @@ int __cpuinit register_cpu(struct cpu *cpu, int num) cpu->dev.id = num; cpu->dev.bus = &cpu_subsys; cpu->dev.release = cpu_device_release; + cpu->dev.offline_disabled = !cpu->hotpluggable; + cpu->dev.offline = !cpu_online(num); #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE cpu->dev.bus->uevent = arch_cpu_uevent; #endif diff --git a/drivers/base/memory.c b/drivers/base/memory.c index e315051cfeeb..2b7813ec6d02 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -37,9 +37,14 @@ static inline int base_memory_block_id(int section_nr) return section_nr / sections_per_block; } +static int memory_subsys_online(struct device *dev); +static int memory_subsys_offline(struct device *dev); + static struct bus_type memory_subsys = { .name = MEMORY_CLASS_NAME, .dev_name = MEMORY_CLASS_NAME, + .online = memory_subsys_online, + .offline = memory_subsys_offline, }; static BLOCKING_NOTIFIER_HEAD(memory_chain); @@ -262,33 +267,64 @@ static int __memory_block_change_state(struct memory_block *mem, { int ret = 0; - if (mem->state != from_state_req) { - ret = -EINVAL; - goto out; - } + if (mem->state != from_state_req) + return -EINVAL; if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; ret = memory_block_action(mem->start_section_nr, to_state, online_type); + mem->state = ret ? from_state_req : to_state; + return ret; +} - if (ret) { - mem->state = from_state_req; - goto out; - } +static int memory_subsys_online(struct device *dev) +{ + struct memory_block *mem = container_of(dev, struct memory_block, dev); + int ret; - mem->state = to_state; - switch (mem->state) { - case MEM_OFFLINE: - kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE); - break; - case MEM_ONLINE: - kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE); - break; - default: - break; + mutex_lock(&mem->state_mutex); + + ret = mem->state == MEM_ONLINE ? 0 : + __memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE, + ONLINE_KEEP); + + mutex_unlock(&mem->state_mutex); + return ret; +} + +static int memory_subsys_offline(struct device *dev) +{ + struct memory_block *mem = container_of(dev, struct memory_block, dev); + int ret; + + mutex_lock(&mem->state_mutex); + + ret = mem->state == MEM_OFFLINE ? 0 : + __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1); + + mutex_unlock(&mem->state_mutex); + return ret; +} + +static int __memory_block_change_state_uevent(struct memory_block *mem, + unsigned long to_state, unsigned long from_state_req, + int online_type) +{ + int ret = __memory_block_change_state(mem, to_state, from_state_req, + online_type); + if (!ret) { + switch (mem->state) { + case MEM_OFFLINE: + kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE); + break; + case MEM_ONLINE: + kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE); + break; + default: + break; + } } -out: return ret; } @@ -299,8 +335,8 @@ static int memory_block_change_state(struct memory_block *mem, int ret; mutex_lock(&mem->state_mutex); - ret = __memory_block_change_state(mem, to_state, from_state_req, - online_type); + ret = __memory_block_change_state_uevent(mem, to_state, from_state_req, + online_type); mutex_unlock(&mem->state_mutex); return ret; @@ -310,22 +346,34 @@ store_mem_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct memory_block *mem; + bool offline; int ret = -EINVAL; mem = container_of(dev, struct memory_block, dev); - if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) + lock_device_hotplug(); + + if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) { + offline = false; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE, ONLINE_KERNEL); - else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) + } else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) { + offline = false; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE, ONLINE_MOVABLE); - else if (!strncmp(buf, "online", min_t(int, count, 6))) + } else if (!strncmp(buf, "online", min_t(int, count, 6))) { + offline = false; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE, ONLINE_KEEP); - else if(!strncmp(buf, "offline", min_t(int, count, 7))) + } else if(!strncmp(buf, "offline", min_t(int, count, 7))) { + offline = true; ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1); + } + if (!ret) + dev->offline = offline; + + unlock_device_hotplug(); if (ret) return ret; @@ -523,6 +571,7 @@ int register_memory(struct memory_block *memory) memory->dev.id = memory->start_section_nr / sections_per_block; memory->dev.release = memory_block_release; memory->dev.groups = memory_memblk_attr_groups; + memory->dev.offline = memory->state == MEM_OFFLINE; error = device_register(&memory->dev); return error; @@ -646,21 +695,6 @@ int unregister_memory_section(struct mem_section *section) } #endif /* CONFIG_MEMORY_HOTREMOVE */ -/* - * offline one memory block. If the memory block has been offlined, do nothing. - */ -int offline_memory_block(struct memory_block *mem) -{ - int ret = 0; - - mutex_lock(&mem->state_mutex); - if (mem->state != MEM_OFFLINE) - ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1); - mutex_unlock(&mem->state_mutex); - - return ret; -} - /* return true if the memory block is offlined, otherwise, return false */ bool is_memblock_offlined(struct memory_block *mem) { diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c index 67a274e86727..5fb74b43848e 100644 --- a/drivers/base/pinctrl.c +++ b/drivers/base/pinctrl.c @@ -48,6 +48,25 @@ int pinctrl_bind_pins(struct device *dev) goto cleanup_get; } +#ifdef CONFIG_PM + /* + * If power management is enabled, we also look for the optional + * sleep and idle pin states, with semantics as defined in + * <linux/pinctrl/pinctrl-state.h> + */ + dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_SLEEP); + if (IS_ERR(dev->pins->sleep_state)) + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no sleep pinctrl state\n"); + + dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_IDLE); + if (IS_ERR(dev->pins->idle_state)) + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no idle pinctrl state\n"); +#endif + return 0; /* diff --git a/drivers/base/platform.c b/drivers/base/platform.c index ed75cf6ef9c9..6eaa7ab9e4bc 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -890,7 +890,6 @@ int platform_pm_restore(struct device *dev) static const struct dev_pm_ops platform_dev_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, - .runtime_idle = pm_generic_runtime_idle, USE_PLATFORM_PM_SLEEP_OPS }; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7072404c8b6d..bfb8955c406c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2143,7 +2143,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; - genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.suspend = pm_genpd_suspend; genpd->domain.ops.suspend_late = pm_genpd_suspend_late; diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index bfd898b8988e..5ee030a864f9 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -12,29 +12,6 @@ #ifdef CONFIG_PM_RUNTIME /** - * pm_generic_runtime_idle - Generic runtime idle callback for subsystems. - * @dev: Device to handle. - * - * If PM operations are defined for the @dev's driver and they include - * ->runtime_idle(), execute it and return its error code, if nonzero. - * Otherwise, execute pm_runtime_suspend() for the device and return 0. - */ -int pm_generic_runtime_idle(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm && pm->runtime_idle) { - int ret = pm->runtime_idle(dev); - if (ret) - return ret; - } - - pm_runtime_suspend(dev); - return 0; -} -EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); - -/** * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. * @dev: Device to suspend. * diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index f0077cb8e249..c8ec186303db 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -648,14 +648,14 @@ int opp_init_cpufreq_table(struct device *dev, list_for_each_entry(opp, &dev_opp->opp_list, node) { if (opp->available) { - freq_table[i].index = i; + freq_table[i].driver_data = i; freq_table[i].frequency = opp->rate / 1000; i++; } } mutex_unlock(&dev_opp_list_lock); - freq_table[i].index = i; + freq_table[i].driver_data = i; freq_table[i].frequency = CPUFREQ_TABLE_END; *table = &freq_table[0]; diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 71671c42ef45..5c1361a9e5dd 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -42,6 +42,7 @@ #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/err.h> +#include <trace/events/power.h> #include "power.h" @@ -305,6 +306,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, else if (!dev->power.qos) ret = dev_pm_qos_constraints_allocate(dev); + trace_dev_pm_qos_add_request(dev_name(dev), type, value); if (!ret) { req->dev = dev; req->type = type; @@ -349,6 +351,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, return -EINVAL; } + trace_dev_pm_qos_update_request(dev_name(req->dev), req->type, + new_value); if (curr_value != new_value) ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); @@ -398,6 +402,8 @@ static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) if (IS_ERR_OR_NULL(req->dev->power.qos)) return -ENODEV; + trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type, + PM_QOS_DEFAULT_VALUE); ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); return ret; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index ef13ad08afb2..268a35097578 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -293,11 +293,8 @@ static int rpm_idle(struct device *dev, int rpmflags) /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; - if (dev->power.no_callbacks) { - /* Assume ->runtime_idle() callback would have suspended. */ - retval = rpm_suspend(dev, rpmflags); + if (dev->power.no_callbacks) goto out; - } /* Carry out an asynchronous or a synchronous idle notification. */ if (rpmflags & RPM_ASYNC) { @@ -306,7 +303,8 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } - goto out; + trace_rpm_return_int(dev, _THIS_IP_, 0); + return 0; } dev->power.idle_notification = true; @@ -326,14 +324,14 @@ static int rpm_idle(struct device *dev, int rpmflags) callback = dev->driver->pm->runtime_idle; if (callback) - __rpm_callback(callback, dev); + retval = __rpm_callback(callback, dev); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); out: trace_rpm_return_int(dev, _THIS_IP_, retval); - return retval; + return retval ? retval : rpm_suspend(dev, rpmflags); } /** diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 79715e7fa43e..2d56f4113ae7 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -659,7 +659,7 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) } EXPORT_SYMBOL_GPL(pm_wakeup_event); -static void print_active_wakeup_sources(void) +void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; int active = 0; @@ -683,6 +683,7 @@ static void print_active_wakeup_sources(void) last_activity_ws->name); rcu_read_unlock(); } +EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); /** * pm_wakeup_pending - Check if power transition in progress should be aborted. @@ -707,8 +708,10 @@ bool pm_wakeup_pending(void) } spin_unlock_irqrestore(&events_lock, flags); - if (ret) - print_active_wakeup_sources(); + if (ret) { + pr_info("PM: Wakeup pending, aborting suspend\n"); + pm_print_active_wakeup_sources(); + } return ret; } diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index c130536e0ab0..29c83160ca29 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -52,6 +52,7 @@ struct regmap_async { struct regmap { struct mutex mutex; spinlock_t spinlock; + unsigned long spinlock_flags; regmap_lock lock; regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ @@ -148,6 +149,7 @@ struct regcache_ops { int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); + int (*drop)(struct regmap *map, unsigned int min, unsigned int max); }; bool regmap_writeable(struct regmap *map, unsigned int reg); @@ -174,6 +176,14 @@ struct regmap_range_node { unsigned int window_len; }; +struct regmap_field { + struct regmap *regmap; + unsigned int mask; + /* lsb */ + unsigned int shift; + unsigned int reg; +}; + #ifdef CONFIG_DEBUG_FS extern void regmap_debugfs_initcall(void); extern void regmap_debugfs_init(struct regmap *map, const char *name); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 02f490bad30f..5c1435c4e210 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -304,6 +304,48 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, return 0; } +static struct regcache_rbtree_node * +regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) +{ + struct regcache_rbtree_node *rbnode; + const struct regmap_range *range; + int i; + + rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); + if (!rbnode) + return NULL; + + /* If there is a read table then use it to guess at an allocation */ + if (map->rd_table) { + for (i = 0; i < map->rd_table->n_yes_ranges; i++) { + if (regmap_reg_in_range(reg, + &map->rd_table->yes_ranges[i])) + break; + } + + if (i != map->rd_table->n_yes_ranges) { + range = &map->rd_table->yes_ranges[i]; + rbnode->blklen = range->range_max - range->range_min + + 1; + rbnode->base_reg = range->range_min; + } + } + + if (!rbnode->blklen) { + rbnode->blklen = sizeof(*rbnode); + rbnode->base_reg = reg; + } + + rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, + GFP_KERNEL); + if (!rbnode->block) { + kfree(rbnode); + return NULL; + } + + return rbnode; +} + static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value) { @@ -354,23 +396,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, return 0; } } - /* we did not manage to find a place to insert it in an existing - * block so create a new rbnode with a single register in its block. - * This block will get populated further if any other adjacent - * registers get modified in the future. + + /* We did not manage to find a place to insert it in + * an existing block so create a new rbnode. */ - rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); + rbnode = regcache_rbtree_node_alloc(map, reg); if (!rbnode) return -ENOMEM; - rbnode->blklen = sizeof(*rbnode); - rbnode->base_reg = reg; - rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, - GFP_KERNEL); - if (!rbnode->block) { - kfree(rbnode); - return -ENOMEM; - } - regcache_rbtree_set_register(map, rbnode, 0, value); + regcache_rbtree_set_register(map, rbnode, + reg - rbnode->base_reg, value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 507ee2da0f6e..e69102696533 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -250,6 +250,38 @@ int regcache_write(struct regmap *map, return 0; } +static int regcache_default_sync(struct regmap *map, unsigned int min, + unsigned int max) +{ + unsigned int reg; + + for (reg = min; reg <= max; reg++) { + unsigned int val; + int ret; + + if (regmap_volatile(map, reg)) + continue; + + ret = regcache_read(map, reg, &val); + if (ret) + return ret; + + /* Is this the hardware default? If so skip. */ + ret = regcache_lookup_reg(map, reg); + if (ret >= 0 && val == map->reg_defaults[ret].def) + continue; + + map->cache_bypass = 1; + ret = _regmap_write(map, reg, val); + map->cache_bypass = 0; + if (ret) + return ret; + dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); + } + + return 0; +} + /** * regcache_sync: Sync the register cache with the hardware. * @@ -268,7 +300,7 @@ int regcache_sync(struct regmap *map) const char *name; unsigned int bypass; - BUG_ON(!map->cache_ops || !map->cache_ops->sync); + BUG_ON(!map->cache_ops); map->lock(map->lock_arg); /* Remember the initial bypass state */ @@ -297,7 +329,10 @@ int regcache_sync(struct regmap *map) } map->cache_bypass = 0; - ret = map->cache_ops->sync(map, 0, map->max_register); + if (map->cache_ops->sync) + ret = map->cache_ops->sync(map, 0, map->max_register); + else + ret = regcache_default_sync(map, 0, map->max_register); if (ret == 0) map->cache_dirty = false; @@ -331,7 +366,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, const char *name; unsigned int bypass; - BUG_ON(!map->cache_ops || !map->cache_ops->sync); + BUG_ON(!map->cache_ops); map->lock(map->lock_arg); @@ -346,7 +381,10 @@ int regcache_sync_region(struct regmap *map, unsigned int min, if (!map->cache_dirty) goto out; - ret = map->cache_ops->sync(map, min, max); + if (map->cache_ops->sync) + ret = map->cache_ops->sync(map, min, max); + else + ret = regcache_default_sync(map, min, max); out: trace_regcache_sync(map->dev, name, "stop region"); @@ -359,6 +397,43 @@ out: EXPORT_SYMBOL_GPL(regcache_sync_region); /** + * regcache_drop_region: Discard part of the register cache + * + * @map: map to operate on + * @min: first register to discard + * @max: last register to discard + * + * Discard part of the register cache. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_drop_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + unsigned int reg; + int ret = 0; + + if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop)) + return -EINVAL; + + map->lock(map->lock_arg); + + trace_regcache_drop_region(map->dev, min, max); + + if (map->cache_present) + for (reg = min; reg < max + 1; reg++) + clear_bit(reg, map->cache_present); + + if (map->cache_ops && map->cache_ops->drop) + ret = map->cache_ops->drop(map, min, max); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regcache_drop_region); + +/** * regcache_cache_only: Put a register map into cache only mode * * @map: map to configure diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 975719bc3450..53495753fbdb 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -84,6 +84,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, unsigned int fpos_offset; unsigned int reg_offset; + /* Suppress the cache if we're using a subrange */ + if (from) + return from; + /* * If we don't have a cache build one so we don't have to do a * linear scan each time. @@ -145,7 +149,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, reg_offset = fpos_offset / map->debugfs_tot_len; *pos = c->min + (reg_offset * map->debugfs_tot_len); mutex_unlock(&map->cache_lock); - return c->base_reg + reg_offset; + return c->base_reg + (reg_offset * map->reg_stride); } *pos = c->max; @@ -281,7 +285,7 @@ static ssize_t regmap_map_write_file(struct file *file, return -EINVAL; /* Userspace has been fiddling around behind the kernel's back */ - add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); ret = regmap_write(map, reg, value); if (ret < 0) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index a941dcfe7590..95920583e31e 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -65,9 +65,8 @@ bool regmap_reg_in_ranges(unsigned int reg, } EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); -static bool _regmap_check_range_table(struct regmap *map, - unsigned int reg, - const struct regmap_access_table *table) +bool regmap_check_range_table(struct regmap *map, unsigned int reg, + const struct regmap_access_table *table) { /* Check "no ranges" first */ if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) @@ -80,6 +79,7 @@ static bool _regmap_check_range_table(struct regmap *map, return regmap_reg_in_ranges(reg, table->yes_ranges, table->n_yes_ranges); } +EXPORT_SYMBOL_GPL(regmap_check_range_table); bool regmap_writeable(struct regmap *map, unsigned int reg) { @@ -90,7 +90,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg) return map->writeable_reg(map->dev, reg); if (map->wr_table) - return _regmap_check_range_table(map, reg, map->wr_table); + return regmap_check_range_table(map, reg, map->wr_table); return true; } @@ -107,7 +107,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) return map->readable_reg(map->dev, reg); if (map->rd_table) - return _regmap_check_range_table(map, reg, map->rd_table); + return regmap_check_range_table(map, reg, map->rd_table); return true; } @@ -121,9 +121,12 @@ bool regmap_volatile(struct regmap *map, unsigned int reg) return map->volatile_reg(map->dev, reg); if (map->volatile_table) - return _regmap_check_range_table(map, reg, map->volatile_table); + return regmap_check_range_table(map, reg, map->volatile_table); - return true; + if (map->cache_ops) + return false; + else + return true; } bool regmap_precious(struct regmap *map, unsigned int reg) @@ -135,7 +138,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg) return map->precious_reg(map->dev, reg); if (map->precious_table) - return _regmap_check_range_table(map, reg, map->precious_table); + return regmap_check_range_table(map, reg, map->precious_table); return false; } @@ -302,13 +305,16 @@ static void regmap_unlock_mutex(void *__map) static void regmap_lock_spinlock(void *__map) { struct regmap *map = __map; - spin_lock(&map->spinlock); + unsigned long flags; + + spin_lock_irqsave(&map->spinlock, flags); + map->spinlock_flags = flags; } static void regmap_unlock_spinlock(void *__map) { struct regmap *map = __map; - spin_unlock(&map->spinlock); + spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); } static void dev_get_regmap_release(struct device *dev, void *res) @@ -801,6 +807,95 @@ struct regmap *devm_regmap_init(struct device *dev, } EXPORT_SYMBOL_GPL(devm_regmap_init); +static void regmap_field_init(struct regmap_field *rm_field, + struct regmap *regmap, struct reg_field reg_field) +{ + int field_bits = reg_field.msb - reg_field.lsb + 1; + rm_field->regmap = regmap; + rm_field->reg = reg_field.reg; + rm_field->shift = reg_field.lsb; + rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb); +} + +/** + * devm_regmap_field_alloc(): Allocate and initialise a register field + * in a register map. + * + * @dev: Device that will be interacted with + * @regmap: regmap bank in which this register field is located. + * @reg_field: Register field with in the bank. + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap_field. The regmap_field will be automatically freed + * by the device management code. + */ +struct regmap_field *devm_regmap_field_alloc(struct device *dev, + struct regmap *regmap, struct reg_field reg_field) +{ + struct regmap_field *rm_field = devm_kzalloc(dev, + sizeof(*rm_field), GFP_KERNEL); + if (!rm_field) + return ERR_PTR(-ENOMEM); + + regmap_field_init(rm_field, regmap, reg_field); + + return rm_field; + +} +EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); + +/** + * devm_regmap_field_free(): Free register field allocated using + * devm_regmap_field_alloc. Usally drivers need not call this function, + * as the memory allocated via devm will be freed as per device-driver + * life-cyle. + * + * @dev: Device that will be interacted with + * @field: regmap field which should be freed. + */ +void devm_regmap_field_free(struct device *dev, + struct regmap_field *field) +{ + devm_kfree(dev, field); +} +EXPORT_SYMBOL_GPL(devm_regmap_field_free); + +/** + * regmap_field_alloc(): Allocate and initialise a register field + * in a register map. + * + * @regmap: regmap bank in which this register field is located. + * @reg_field: Register field with in the bank. + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap_field. The regmap_field should be freed by the + * user once its finished working with it using regmap_field_free(). + */ +struct regmap_field *regmap_field_alloc(struct regmap *regmap, + struct reg_field reg_field) +{ + struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); + + if (!rm_field) + return ERR_PTR(-ENOMEM); + + regmap_field_init(rm_field, regmap, reg_field); + + return rm_field; +} +EXPORT_SYMBOL_GPL(regmap_field_alloc); + +/** + * regmap_field_free(): Free register field allocated using regmap_field_alloc + * + * @field: regmap field which should be freed. + */ +void regmap_field_free(struct regmap_field *field) +{ + kfree(field); +} +EXPORT_SYMBOL_GPL(regmap_field_free); + /** * regmap_reinit_cache(): Reinitialise the current register cache * @@ -1249,6 +1344,22 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, } EXPORT_SYMBOL_GPL(regmap_raw_write); +/** + * regmap_field_write(): Write a value to a single register field + * + * @field: Register field to write to + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_write(struct regmap_field *field, unsigned int val) +{ + return regmap_update_bits(field->regmap, field->reg, + field->mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_field_write); + /* * regmap_bulk_write(): Write multiple registers to the device * @@ -1532,6 +1643,31 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, EXPORT_SYMBOL_GPL(regmap_raw_read); /** + * regmap_field_read(): Read a value to a single register field + * + * @field: Register field to read from + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_read(struct regmap_field *field, unsigned int *val) +{ + int ret; + unsigned int reg_val; + ret = regmap_read(field->regmap, field->reg, ®_val); + if (ret != 0) + return ret; + + reg_val &= field->mask; + reg_val >>= field->shift; + *val = reg_val; + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_field_read); + +/** * regmap_bulk_read(): Read multiple registers from the device * * @map: Register map to write to |