diff options
Diffstat (limited to 'drivers/base')
28 files changed, 1063 insertions, 500 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index f046d21de57d..1a5f6a157a57 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -140,13 +140,10 @@ config EXTRA_FIRMWARE config EXTRA_FIRMWARE_DIR string "Firmware blobs root directory" depends on EXTRA_FIRMWARE != "" - default "firmware" + default "/lib/firmware" help This option controls the directory in which the kernel build system looks for the firmware files listed in the EXTRA_FIRMWARE option. - The default is firmware/ in the kernel source tree, but by changing - this option you can point it elsewhere, such as /lib/firmware/ or - some other directory containing the firmware files. config FW_LOADER_USER_HELPER bool diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index d1c33a85059e..6df7d6676a48 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -41,8 +41,7 @@ static ssize_t cpu_capacity_show(struct device *dev, { struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%lu\n", - topology_get_cpu_scale(NULL, cpu->dev.id)); + return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); } static ssize_t cpu_capacity_store(struct device *dev, @@ -96,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl); static u32 capacity_scale; static u32 *raw_capacity; -static bool cap_parsing_failed; + +static int __init free_raw_capacity(void) +{ + kfree(raw_capacity); + raw_capacity = NULL; + + return 0; +} void topology_normalize_cpu_scale(void) { u64 capacity; int cpu; - if (!raw_capacity || cap_parsing_failed) + if (!raw_capacity) return; pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); @@ -120,16 +126,16 @@ void topology_normalize_cpu_scale(void) mutex_unlock(&cpu_scale_mutex); } -int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) +bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) { - int ret = 1; + static bool cap_parsing_failed; + int ret; u32 cpu_capacity; if (cap_parsing_failed) - return !ret; + return false; - ret = of_property_read_u32(cpu_node, - "capacity-dmips-mhz", + ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", &cpu_capacity); if (!ret) { if (!raw_capacity) { @@ -139,33 +145,32 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) if (!raw_capacity) { pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); cap_parsing_failed = true; - return 0; + return false; } } capacity_scale = max(cpu_capacity, capacity_scale); raw_capacity[cpu] = cpu_capacity; - pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n", - cpu_node->full_name, raw_capacity[cpu]); + pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", + cpu_node, raw_capacity[cpu]); } else { if (raw_capacity) { - pr_err("cpu_capacity: missing %s raw capacity\n", - cpu_node->full_name); + pr_err("cpu_capacity: missing %pOF raw capacity\n", + cpu_node); pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); } cap_parsing_failed = true; - kfree(raw_capacity); + free_raw_capacity(); } return !ret; } #ifdef CONFIG_CPU_FREQ -static cpumask_var_t cpus_to_visit; -static bool cap_parsing_done; -static void parsing_done_workfn(struct work_struct *work); -static DECLARE_WORK(parsing_done_work, parsing_done_workfn); +static cpumask_var_t cpus_to_visit __initdata; +static void __init parsing_done_workfn(struct work_struct *work); +static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn); -static int +static int __init init_cpu_capacity_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -173,34 +178,35 @@ init_cpu_capacity_callback(struct notifier_block *nb, struct cpufreq_policy *policy = data; int cpu; - if (cap_parsing_failed || cap_parsing_done) + if (!raw_capacity) return 0; - switch (val) { - case CPUFREQ_NOTIFY: - pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", - cpumask_pr_args(policy->related_cpus), - cpumask_pr_args(cpus_to_visit)); - cpumask_andnot(cpus_to_visit, - cpus_to_visit, - policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) { - raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * - policy->cpuinfo.max_freq / 1000UL; - capacity_scale = max(raw_capacity[cpu], capacity_scale); - } - if (cpumask_empty(cpus_to_visit)) { - topology_normalize_cpu_scale(); - kfree(raw_capacity); - pr_debug("cpu_capacity: parsing done\n"); - cap_parsing_done = true; - schedule_work(&parsing_done_work); - } + if (val != CPUFREQ_NOTIFY) + return 0; + + pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", + cpumask_pr_args(policy->related_cpus), + cpumask_pr_args(cpus_to_visit)); + + cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); + + for_each_cpu(cpu, policy->related_cpus) { + raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * + policy->cpuinfo.max_freq / 1000UL; + capacity_scale = max(raw_capacity[cpu], capacity_scale); + } + + if (cpumask_empty(cpus_to_visit)) { + topology_normalize_cpu_scale(); + free_raw_capacity(); + pr_debug("cpu_capacity: parsing done\n"); + schedule_work(&parsing_done_work); } + return 0; } -static struct notifier_block init_cpu_capacity_notifier = { +static struct notifier_block init_cpu_capacity_notifier __initdata = { .notifier_call = init_cpu_capacity_callback, }; @@ -226,18 +232,12 @@ static int __init register_cpufreq_notifier(void) } core_initcall(register_cpufreq_notifier); -static void parsing_done_workfn(struct work_struct *work) +static void __init parsing_done_workfn(struct work_struct *work) { cpufreq_unregister_notifier(&init_cpu_capacity_notifier, CPUFREQ_POLICY_NOTIFIER); } #else -static int __init free_raw_capacity(void) -{ - kfree(raw_capacity); - - return 0; -} core_initcall(free_raw_capacity); #endif diff --git a/drivers/base/base.h b/drivers/base/base.h index e19b1008e5fb..539432a14b5c 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv, extern void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); -extern int device_add_groups(struct device *dev, - const struct attribute_group **groups); -extern void device_remove_groups(struct device *dev, - const struct attribute_group **groups); - extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index e162c9a789ba..22a64fd3309b 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv) out_unregister: kobject_put(&priv->kobj); - kfree(drv->p); + /* drv->p is freed in driver_release() */ drv->p = NULL; out_put_bus: bus_put(bus); diff --git a/drivers/base/core.c b/drivers/base/core.c index 755451f684bc..12ebd055724c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1023,12 +1023,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups) { return sysfs_create_groups(&dev->kobj, groups); } +EXPORT_SYMBOL_GPL(device_add_groups); void device_remove_groups(struct device *dev, const struct attribute_group **groups) { sysfs_remove_groups(&dev->kobj, groups); } +EXPORT_SYMBOL_GPL(device_remove_groups); + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +static int devm_attr_group_match(struct device *dev, void *res, void *data) +{ + return ((union device_attr_group_devres *)res)->group == data; +} + +static void devm_attr_group_remove(struct device *dev, void *res) +{ + union device_attr_group_devres *devres = res; + const struct attribute_group *group = devres->group; + + dev_dbg(dev, "%s: removing group %p\n", __func__, group); + sysfs_remove_group(&dev->kobj, group); +} + +static void devm_attr_groups_remove(struct device *dev, void *res) +{ + union device_attr_group_devres *devres = res; + const struct attribute_group **groups = devres->groups; + + dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); + sysfs_remove_groups(&dev->kobj, groups); +} + +/** + * devm_device_add_group - given a device, create a managed attribute group + * @dev: The device to create the group for + * @grp: The attribute group to create + * + * This function creates a group for the first time. It will explicitly + * warn and error if any of the attribute files being created already exist. + * + * Returns 0 on success or error code on failure. + */ +int devm_device_add_group(struct device *dev, const struct attribute_group *grp) +{ + union device_attr_group_devres *devres; + int error; + + devres = devres_alloc(devm_attr_group_remove, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + error = sysfs_create_group(&dev->kobj, grp); + if (error) { + devres_free(devres); + return error; + } + + devres->group = grp; + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_device_add_group); + +/** + * devm_device_remove_group: remove a managed group from a device + * @dev: device to remove the group from + * @grp: group to remove + * + * This function removes a group of attributes from a device. The attributes + * previously have to have been created for this group, otherwise it will fail. + */ +void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp) +{ + WARN_ON(devres_release(dev, devm_attr_group_remove, + devm_attr_group_match, + /* cast away const */ (void *)grp)); +} +EXPORT_SYMBOL_GPL(devm_device_remove_group); + +/** + * devm_device_add_groups - create a bunch of managed attribute groups + * @dev: The device to create the group for + * @groups: The attribute groups to create, NULL terminated + * + * This function creates a bunch of managed attribute groups. If an error + * occurs when creating a group, all previously created groups will be + * removed, unwinding everything back to the original state when this + * function was called. It will explicitly warn and error if any of the + * attribute files being created already exist. + * + * Returns 0 on success or error code from sysfs_create_group on failure. + */ +int devm_device_add_groups(struct device *dev, + const struct attribute_group **groups) +{ + union device_attr_group_devres *devres; + int error; + + devres = devres_alloc(devm_attr_groups_remove, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + error = sysfs_create_groups(&dev->kobj, groups); + if (error) { + devres_free(devres); + return error; + } + + devres->groups = groups; + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_device_add_groups); + +/** + * devm_device_remove_groups - remove a list of managed groups + * + * @dev: The device for the groups to be removed from + * @groups: NULL terminated list of groups to be removed + * + * If groups is not NULL, remove the specified groups from the device. + */ +void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups) +{ + WARN_ON(devres_release(dev, devm_attr_groups_remove, + devm_attr_group_match, + /* cast away const */ (void *)groups)); +} +EXPORT_SYMBOL_GPL(devm_device_remove_groups); static int device_add_attrs(struct device *dev) { @@ -2664,11 +2796,12 @@ void device_shutdown(void) pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); - if (dev->class && dev->class->shutdown) { + if (dev->class && dev->class->shutdown_pre) { if (initcall_debug) - dev_info(dev, "shutdown\n"); - dev->class->shutdown(dev); - } else if (dev->bus && dev->bus->shutdown) { + dev_info(dev, "shutdown_pre\n"); + dev->class->shutdown_pre(dev); + } + if (dev->bus && dev->bus->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2c3b359b3536..227bac5f1191 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -256,9 +256,9 @@ static ssize_t print_cpus_offline(struct device *dev, buf[n++] = ','; if (nr_cpu_ids == total_cpus-1) - n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); + n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); else - n += snprintf(&buf[n], len - n, "%d-%d", + n += snprintf(&buf[n], len - n, "%u-%d", nr_cpu_ids, total_cpus-1); } @@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num) per_cpu(cpu_sys_devices, num) = &cpu->dev; register_cpu_under_node(num, cpu_to_node(num)); - dev_pm_qos_expose_latency_limit(&cpu->dev, 0); + dev_pm_qos_expose_latency_limit(&cpu->dev, + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT); return 0; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 4882f06d12df..ad44b40fe284 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -20,6 +20,7 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/init.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/wait.h> @@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static atomic_t deferred_trigger_count = ATOMIC_INIT(0); +static bool initcalls_done; /* * In some cases, like suspend to RAM or hibernation, It might be reasonable @@ -62,6 +64,26 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0); static bool defer_all_probes; /* + * For initcall_debug, show the deferred probes executed in late_initcall + * processing. + */ +static void deferred_probe_debug(struct device *dev) +{ + ktime_t calltime, delta, rettime; + unsigned long long duration; + + printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev), + task_pid_nr(current)); + calltime = ktime_get(); + bus_probe_device(dev); + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n", + dev_name(dev), duration); +} + +/* * deferred_probe_work_func() - Retry probing devices in the active list. */ static void deferred_probe_work_func(struct work_struct *work) @@ -106,7 +128,10 @@ static void deferred_probe_work_func(struct work_struct *work) device_pm_unlock(); dev_dbg(dev, "Retrying from deferred list\n"); - bus_probe_device(dev); + if (initcall_debug && !initcalls_done) + deferred_probe_debug(dev); + else + bus_probe_device(dev); mutex_lock(&deferred_probe_mutex); @@ -215,6 +240,7 @@ static int deferred_probe_initcall(void) driver_deferred_probe_trigger(); /* Sort as many dependencies as possible before exiting initcalls */ flush_work(&deferred_probe_work); + initcalls_done = true; return 0; } late_initcall(deferred_probe_initcall); @@ -259,6 +285,8 @@ static void driver_bound(struct device *dev) if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_BOUND_DRIVER, dev); + + kobject_uevent(&dev->kobj, KOBJ_BIND); } static int driver_sysfs_add(struct device *dev) @@ -848,6 +876,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBOUND_DRIVER, dev); + + kobject_uevent(&dev->kobj, KOBJ_UNBIND); } } diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c..744f64f43454 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de { if (dev && dev->dma_mem) return dev->dma_mem; - return dma_coherent_default_memory; + return NULL; } static inline dma_addr_t dma_get_device_base(struct device *dev, @@ -37,7 +37,7 @@ static inline dma_addr_t dma_get_device_base(struct device *dev, return mem->device_base; } -static bool dma_init_coherent_memory( +static int dma_init_coherent_memory( phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, struct dma_coherent_mem **mem) { @@ -45,25 +45,28 @@ static bool dma_init_coherent_memory( void __iomem *mem_base = NULL; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); + int ret; - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) - goto out; - if (!size) + if (!size) { + ret = -EINVAL; goto out; + } - if (flags & DMA_MEMORY_MAP) - mem_base = memremap(phys_addr, size, MEMREMAP_WC); - else - mem_base = ioremap(phys_addr, size); - if (!mem_base) + mem_base = memremap(phys_addr, size, MEMREMAP_WC); + if (!mem_base) { + ret = -EINVAL; goto out; - + } dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); - if (!dma_mem) + if (!dma_mem) { + ret = -ENOMEM; goto out; + } dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!dma_mem->bitmap) + if (!dma_mem->bitmap) { + ret = -ENOMEM; goto out; + } dma_mem->virt_base = mem_base; dma_mem->device_base = device_addr; @@ -73,17 +76,13 @@ static bool dma_init_coherent_memory( spin_lock_init(&dma_mem->spinlock); *mem = dma_mem; - return true; + return 0; out: kfree(dma_mem); - if (mem_base) { - if (flags & DMA_MEMORY_MAP) - memunmap(mem_base); - else - iounmap(mem_base); - } - return false; + if (mem_base) + memunmap(mem_base); + return ret; } static void dma_release_coherent_memory(struct dma_coherent_mem *mem) @@ -91,10 +90,7 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) if (!mem) return; - if (mem->flags & DMA_MEMORY_MAP) - memunmap(mem->virt_base); - else - iounmap(mem->virt_base); + memunmap(mem->virt_base); kfree(mem->bitmap); kfree(mem); } @@ -109,8 +105,6 @@ static int dma_assign_coherent_memory(struct device *dev, return -EBUSY; dev->dma_mem = mem; - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - return 0; } @@ -118,16 +112,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { struct dma_coherent_mem *mem; + int ret; - if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags, - &mem)) - return 0; - - if (dma_assign_coherent_memory(dev, mem) == 0) - return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO; + ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); + if (ret) + return ret; - dma_release_coherent_memory(mem); - return 0; + ret = dma_assign_coherent_memory(dev, mem); + if (ret) + dma_release_coherent_memory(mem); + return ret; } EXPORT_SYMBOL(dma_declare_coherent_memory); @@ -165,9 +159,38 @@ void *dma_mark_declared_memory_occupied(struct device *dev, } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); +static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) +{ + int order = get_order(size); + unsigned long flags; + int pageno; + void *ret; + + spin_lock_irqsave(&mem->spinlock, flags); + + if (unlikely(size > (mem->size << PAGE_SHIFT))) + goto err; + + pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); + if (unlikely(pageno < 0)) + goto err; + + /* + * Memory was found in the coherent area. + */ + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + ret = mem->virt_base + (pageno << PAGE_SHIFT); + spin_unlock_irqrestore(&mem->spinlock, flags); + memset(ret, 0, size); + return ret; +err: + spin_unlock_irqrestore(&mem->spinlock, flags); + return NULL; +} + /** - * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area - * + * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool * @dev: device from which we allocate memory * @size: size of requested memory area * @dma_handle: This will be filled with the correct dma handle @@ -180,44 +203,18 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); * Returns 0 if dma_alloc_coherent should continue with allocating from * generic memory areas, or !0 if dma_alloc_coherent should return @ret. */ -int dma_alloc_from_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); - int order = get_order(size); - unsigned long flags; - int pageno; - int dma_memory_map; if (!mem) return 0; - *ret = NULL; - spin_lock_irqsave(&mem->spinlock, flags); - - if (unlikely(size > (mem->size << PAGE_SHIFT))) - goto err; - - pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); - if (unlikely(pageno < 0)) - goto err; - - /* - * Memory was found in the per-device area. - */ - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); - *ret = mem->virt_base + (pageno << PAGE_SHIFT); - dma_memory_map = (mem->flags & DMA_MEMORY_MAP); - spin_unlock_irqrestore(&mem->spinlock, flags); - if (dma_memory_map) - memset(*ret, 0, size); - else - memset_io(*ret, 0, size); - - return 1; + *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + if (*ret) + return 1; -err: - spin_unlock_irqrestore(&mem->spinlock, flags); /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the @@ -225,25 +222,20 @@ err: */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } -EXPORT_SYMBOL(dma_alloc_from_coherent); +EXPORT_SYMBOL(dma_alloc_from_dev_coherent); -/** - * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool - * @dev: device from which the memory was allocated - * @order: the order of pages allocated - * @vaddr: virtual address of allocated pages - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, releases that memory. - * - * Returns 1 if we correctly released the memory, or 0 if - * dma_release_coherent() should proceed with releasing memory from - * generic pools. - */ -int dma_release_from_coherent(struct device *dev, int order, void *vaddr) +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dma_coherent_default_memory, size, + dma_handle); +} +static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) +{ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; @@ -256,28 +248,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) } return 0; } -EXPORT_SYMBOL(dma_release_from_coherent); /** - * dma_mmap_from_coherent() - try to mmap the memory allocated from - * per-device coherent memory pool to userspace + * dma_release_from_dev_coherent() - free memory to device coherent memory pool * @dev: device from which the memory was allocated - * @vma: vm_area for the userspace memory - * @vaddr: cpu address returned by dma_alloc_from_coherent - * @size: size of the memory buffer allocated by dma_alloc_from_coherent - * @ret: result from remap_pfn_range() + * @order: the order of pages allocated + * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, maps that memory to the provided vma. + * coherent memory pool and if so, releases that memory. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if we correctly released the memory, or 0 if the caller should + * proceed with releasing memory from generic pools. */ -int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, - void *vaddr, size_t size, int *ret) +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + return __dma_release_from_coherent(mem, order, vaddr); +} +EXPORT_SYMBOL(dma_release_from_dev_coherent); + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + +static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) +{ if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; @@ -296,7 +299,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, } return 0; } -EXPORT_SYMBOL(dma_mmap_from_coherent); + +/** + * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool + * @dev: device from which the memory was allocated + * @vma: vm_area for the userspace memory + * @vaddr: cpu address returned by dma_alloc_from_dev_coherent + * @size: size of the memory buffer allocated + * @ret: result from remap_pfn_range() + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, maps that memory to the provided vma. + * + * Returns 1 if we correctly mapped the memory, or 0 if the caller should + * proceed with mapping memory from generic pools. + */ +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); +} +EXPORT_SYMBOL(dma_mmap_from_dev_coherent); + +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, + size_t size, int *ret) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, + vaddr, size, ret); +} /* * Support for reserved memory regions defined in device tree @@ -311,14 +346,17 @@ static struct reserved_mem *dma_reserved_default_memory __initdata; static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { struct dma_coherent_mem *mem = rmem->priv; + int ret; - if (!mem && - !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, - DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE, - &mem)) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return -ENODEV; + if (!mem) { + ret = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, + DMA_MEMORY_EXCLUSIVE, &mem); + if (ret) { + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return ret; + } } mem->use_dev_dma_pfn_offset = true; rmem->priv = mem; diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e..e584eddef0a7 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -176,13 +176,10 @@ int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, flags); - if (rc) { + if (!rc) devres_add(dev, res); - rc = 0; - } else { + else devres_free(res); - rc = -ENOMEM; - } return rc; } @@ -235,7 +232,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf7..4b57cf5bc81d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -7,6 +7,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/capability.h> #include <linux/device.h> #include <linux/module.h> @@ -30,7 +32,6 @@ #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> -#include <linux/swait.h> #include <generated/utsrelease.h> @@ -112,13 +113,13 @@ static inline long firmware_loading_timeout(void) * state of the firmware loading. */ struct fw_state { - struct swait_queue_head wq; + struct completion completion; enum fw_status status; }; static void fw_state_init(struct fw_state *fw_st) { - init_swait_queue_head(&fw_st->wq); + init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } @@ -131,9 +132,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) { long ret; - ret = swait_event_interruptible_timeout(fw_st->wq, - __fw_state_is_done(READ_ONCE(fw_st->status)), - timeout); + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) return -ENOENT; if (!ret) @@ -148,35 +147,34 @@ static void __fw_state_set(struct fw_state *fw_st, WRITE_ONCE(fw_st->status, status); if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) - swake_up(&fw_st->wq); + complete_all(&fw_st->completion); } #define fw_state_start(fw_st) \ __fw_state_set(fw_st, FW_STATUS_LOADING) #define fw_state_done(fw_st) \ __fw_state_set(fw_st, FW_STATUS_DONE) +#define fw_state_aborted(fw_st) \ + __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_wait(fw_st) \ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) -#ifndef CONFIG_FW_LOADER_USER_HELPER - -#define fw_state_is_aborted(fw_st) false - -#else /* CONFIG_FW_LOADER_USER_HELPER */ - static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) { return fw_st->status == status; } +#define fw_state_is_aborted(fw_st) \ + __fw_state_check(fw_st, FW_STATUS_ABORTED) + +#ifdef CONFIG_FW_LOADER_USER_HELPER + #define fw_state_aborted(fw_st) \ __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_is_done(fw_st) \ __fw_state_check(fw_st, FW_STATUS_DONE) #define fw_state_is_loading(fw_st) \ __fw_state_check(fw_st, FW_STATUS_LOADING) -#define fw_state_is_aborted(fw_st) \ - __fw_state_check(fw_st, FW_STATUS_ABORTED) #define fw_state_wait_timeout(fw_st, timeout) \ __fw_state_wait_common(fw_st, timeout) @@ -260,38 +258,6 @@ static int fw_cache_piggyback_on_request(const char *name); * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); -static bool __enable_firmware = false; - -static void enable_firmware(void) -{ - mutex_lock(&fw_lock); - __enable_firmware = true; - mutex_unlock(&fw_lock); -} - -static void disable_firmware(void) -{ - mutex_lock(&fw_lock); - __enable_firmware = false; - mutex_unlock(&fw_lock); -} - -/* - * When disabled only the built-in firmware and the firmware cache will be - * used to look for firmware. - */ -static bool firmware_enabled(void) -{ - bool enabled = false; - - mutex_lock(&fw_lock); - if (__enable_firmware) - enabled = true; - mutex_unlock(&fw_lock); - - return enabled; -} - static struct firmware_cache fw_cache; static struct firmware_buf *__allocate_fw_buf(const char *fw_name, @@ -335,6 +301,7 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name) return NULL; } +/* Returns 1 for batching firmware requests with the same name */ static int fw_lookup_and_allocate_buf(const char *fw_name, struct firmware_cache *fwc, struct firmware_buf **buf, void *dbuf, @@ -348,6 +315,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name, kref_get(&tmp->ref); spin_unlock(&fwc->lock); *buf = tmp; + pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n"); return 1; } tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size); @@ -1089,9 +1057,12 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, mutex_unlock(&fw_lock); } - if (fw_state_is_aborted(&buf->fw_st)) - retval = -EAGAIN; - else if (buf->is_paged_buf && !buf->data) + if (fw_state_is_aborted(&buf->fw_st)) { + if (retval == -ERESTARTSYS) + retval = -EINTR; + else + retval = -EAGAIN; + } else if (buf->is_paged_buf && !buf->data) retval = -ENOMEM; device_del(f_dev); @@ -1200,6 +1171,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 1; /* need to load */ } +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct firmware_buf and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct firmware_buf *buf; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + buf = fw->priv; + if (!fw_state_is_aborted(&buf->fw_st)) + fw_state_aborted(&buf->fw_st); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1221,12 +1214,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name, if (ret <= 0) /* error or already assigned */ goto out; - if (!firmware_enabled()) { - WARN(1, "firmware request while host is not available\n"); - ret = -EHOSTDOWN; - goto out; - } - ret = fw_get_filesystem_firmware(device, fw->priv); if (ret) { if (!(opt_flags & FW_OPT_NO_WARN)) @@ -1243,6 +1230,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, out: if (ret < 0) { + fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } @@ -1736,62 +1724,6 @@ static void device_uncache_fw_images_delay(unsigned long delay) msecs_to_jiffies(delay)); } -/** - * fw_pm_notify - notifier for suspend/resume - * @notify_block: unused - * @mode: mode we are switching to - * @unused: unused - * - * Used to modify the firmware_class state as we move in between states. - * The firmware_class implements a firmware cache to enable device driver - * to fetch firmware upon resume before the root filesystem is ready. We - * disable API calls which do not use the built-in firmware or the firmware - * cache when we know these calls will not work. - * - * The inner logic behind all this is a bit complex so it is worth summarizing - * the kernel's own suspend/resume process with context and focus on how this - * can impact the firmware API. - * - * First a review on how we go to suspend:: - * - * pm_suspend() --> enter_state() --> - * sys_sync() - * suspend_prepare() --> - * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...); - * suspend_freeze_processes() --> - * freeze_processes() --> - * __usermodehelper_set_disable_depth(UMH_DISABLED); - * freeze all tasks ... - * freeze_kernel_threads() - * suspend_devices_and_enter() --> - * dpm_suspend_start() --> - * dpm_prepare() - * dpm_suspend() - * suspend_enter() --> - * platform_suspend_prepare() - * dpm_suspend_late() - * freeze_enter() - * syscore_suspend() - * - * When we resume we bail out of a loop from suspend_devices_and_enter() and - * unwind back out to the caller enter_state() where we were before as follows:: - * - * enter_state() --> - * suspend_devices_and_enter() --> (bail from loop) - * dpm_resume_end() --> - * dpm_resume() - * dpm_complete() - * suspend_finish() --> - * suspend_thaw_processes() --> - * thaw_processes() --> - * __usermodehelper_set_disable_depth(UMH_FREEZING); - * thaw_workqueues(); - * thaw all processes ... - * usermodehelper_enable(); - * pm_notifier_call_chain(PM_POST_SUSPEND); - * - * fw_pm_notify() works through pm_notifier_call_chain(). - */ static int fw_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { @@ -1805,7 +1737,6 @@ static int fw_pm_notify(struct notifier_block *notify_block, */ kill_pending_fw_fallback_reqs(true); device_cache_fw_images(); - disable_firmware(); break; case PM_POST_SUSPEND: @@ -1818,7 +1749,6 @@ static int fw_pm_notify(struct notifier_block *notify_block, mutex_lock(&fw_lock); fw_cache.state = FW_LOADER_NO_CACHE; mutex_unlock(&fw_lock); - enable_firmware(); device_uncache_fw_images_delay(10 * MSEC_PER_SEC); break; @@ -1867,7 +1797,6 @@ static void __init fw_cache_init(void) static int fw_shutdown_notify(struct notifier_block *unused1, unsigned long unused2, void *unused3) { - disable_firmware(); /* * Kill all pending fallback requests to avoid both stalling shutdown, * and avoid a deadlock with the usermode_lock. @@ -1883,7 +1812,6 @@ static struct notifier_block fw_shutdown_nb = { static int __init firmware_class_init(void) { - enable_firmware(); fw_cache_init(); register_reboot_notifier(&fw_shutdown_nb); #ifdef CONFIG_FW_LOADER_USER_HELPER @@ -1895,7 +1823,6 @@ static int __init firmware_class_init(void) static void __exit firmware_class_exit(void) { - disable_firmware(); #ifdef CONFIG_PM_SLEEP unregister_syscore_ops(&fw_syscore_ops); unregister_pm_notifier(&fw_cache.pm_notify); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c7c4e0325cdb..4e3b61cda520 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -388,6 +388,19 @@ static ssize_t show_phys_device(struct device *dev, } #ifdef CONFIG_MEMORY_HOTREMOVE +static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, + unsigned long nr_pages, int online_type, + struct zone *default_zone) +{ + struct zone *zone; + + zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); + if (zone != default_zone) { + strcat(buf, " "); + strcat(buf, zone->name); + } +} + static ssize_t show_valid_zones(struct device *dev, struct device_attribute *attr, char *buf) { @@ -395,7 +408,7 @@ static ssize_t show_valid_zones(struct device *dev, unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long valid_start_pfn, valid_end_pfn; - bool append = false; + struct zone *default_zone; int nid; /* @@ -418,16 +431,13 @@ static ssize_t show_valid_zones(struct device *dev, } nid = pfn_to_nid(start_pfn); - if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) { - strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name); - append = true; - } + default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); + strcat(buf, default_zone->name); - if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) { - if (append) - strcat(buf, " "); - strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name); - } + print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, + default_zone); + print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, + default_zone); out: strcat(buf, "\n"); diff --git a/drivers/base/node.c b/drivers/base/node.c index d8dc83017d8d..aae2402f3791 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -27,13 +27,21 @@ static struct bus_type node_subsys = { static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) { + ssize_t n; + cpumask_var_t mask; struct node *node_dev = to_node(dev); - const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); - return cpumap_print_to_pagebuf(list, buf, mask); + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return 0; + + cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); + n = cpumap_print_to_pagebuf(list, buf, mask); + free_cpumask_var(mask); + + return n; } static inline ssize_t node_read_cpumask(struct device *dev, @@ -160,12 +168,12 @@ static ssize_t node_read_numastat(struct device *dev, "interleave_hit %lu\n" "local_node %lu\n" "other_node %lu\n", - sum_zone_node_page_state(dev->id, NUMA_HIT), - sum_zone_node_page_state(dev->id, NUMA_MISS), - sum_zone_node_page_state(dev->id, NUMA_FOREIGN), - sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT), - sum_zone_node_page_state(dev->id, NUMA_LOCAL), - sum_zone_node_page_state(dev->id, NUMA_OTHER)); + sum_zone_numa_state(dev->id, NUMA_HIT), + sum_zone_numa_state(dev->id, NUMA_MISS), + sum_zone_numa_state(dev->id, NUMA_FOREIGN), + sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), + sum_zone_numa_state(dev->id, NUMA_LOCAL), + sum_zone_numa_state(dev->id, NUMA_OTHER)); } static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); @@ -181,9 +189,17 @@ static ssize_t node_read_vmstat(struct device *dev, n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], sum_zone_node_page_state(nid, i)); - for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) +#ifdef CONFIG_NUMA + for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) n += sprintf(buf+n, "%s %lu\n", vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], + sum_zone_numa_state(nid, i)); +#endif + + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + n += sprintf(buf+n, "%s %lu\n", + vmstat_text[i + NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_STAT_ITEMS], node_page_state(pgdat, i)); return n; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index d1bd99271066..9045c5f3734e 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev, struct platform_device *pdev = to_platform_device(dev); char *driver_override, *old, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 3b8210ebb50e..e8ca5e2cf1e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } +#ifdef CONFIG_DEBUG_FS +static void genpd_update_accounting(struct generic_pm_domain *genpd) +{ + ktime_t delta, now; + + now = ktime_get(); + delta = ktime_sub(now, genpd->accounting_time); + + /* + * If genpd->status is active, it means we are just + * out of off and so update the idle time and vice + * versa. + */ + if (genpd->status == GPD_STATE_ACTIVE) { + int state_idx = genpd->state_idx; + + genpd->states[state_idx].idle_time = + ktime_add(genpd->states[state_idx].idle_time, delta); + } else { + genpd->on_time = ktime_add(genpd->on_time, delta); + } + + genpd->accounting_time = now; +} +#else +static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} +#endif + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, } genpd->status = GPD_STATE_POWER_OFF; + genpd_update_accounting(genpd); list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) goto err; genpd->status = GPD_STATE_ACTIVE; + genpd_update_accounting(genpd); + return 0; err: @@ -1222,8 +1253,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); - dev_pm_domain_set(dev, &genpd->domain); - return gpd_data; err_free: @@ -1237,8 +1266,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev, struct generic_pm_domain_data *gpd_data) { - dev_pm_domain_set(dev, NULL); - spin_lock_irq(&dev->power.lock); dev->power.subsys_data->domain_data = NULL; @@ -1275,6 +1302,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (ret) goto out; + dev_pm_domain_set(dev, &genpd->domain); + genpd->device_count++; genpd->max_off_time_changed = true; @@ -1336,6 +1365,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, if (genpd->detach_dev) genpd->detach_dev(genpd, dev); + dev_pm_domain_set(dev, NULL); + list_del_init(&pdd->list_node); genpd_unlock(genpd); @@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->provider = NULL; genpd->has_provider = false; + genpd->accounting_time = ktime_get(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; @@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, mutex_lock(&of_genpd_mutex); list_add(&cp->link, &of_genpd_providers); mutex_unlock(&of_genpd_mutex); - pr_debug("Added domain provider from %s\n", np->full_name); + pr_debug("Added domain provider from %pOF\n", np); return 0; } @@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } @@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, ret = genpd_parse_state(&st[i++], np); if (ret) { pr_err - ("Parsing idle state node %s failed with err %d\n", - np->full_name, ret); + ("Parsing idle state node %pOF failed with err %d\n", + np, ret); of_node_put(np); kfree(st); return ret; @@ -2327,7 +2359,7 @@ exit: return 0; } -static int pm_genpd_summary_show(struct seq_file *s, void *data) +static int genpd_summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int pm_genpd_summary_open(struct inode *inode, struct file *file) +static int genpd_status_show(struct seq_file *s, void *data) { - return single_open(file, pm_genpd_summary_show, NULL); + static const char * const status_lookup[] = { + [GPD_STATE_ACTIVE] = "on", + [GPD_STATE_POWER_OFF] = "off" + }; + + struct generic_pm_domain *genpd = s->private; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + + if (genpd->status == GPD_STATE_POWER_OFF) + seq_printf(s, "%s-%u\n", status_lookup[genpd->status], + genpd->state_idx); + else + seq_printf(s, "%s\n", status_lookup[genpd->status]); +exit: + genpd_unlock(genpd); + return ret; } -static const struct file_operations pm_genpd_summary_fops = { - .open = pm_genpd_summary_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +static int genpd_sub_domains_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct gpd_link *link; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(link, &genpd->master_links, master_node) + seq_printf(s, "%s\n", link->slave->name); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_idle_states_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + seq_puts(s, "State Time Spent(ms)\n"); + + for (i = 0; i < genpd->state_count; i++) { + ktime_t delta = 0; + s64 msecs; + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + msecs = ktime_to_ms( + ktime_add(genpd->states[i].idle_time, delta)); + seq_printf(s, "S%-13i %lld\n", i, msecs); + } + + genpd_unlock(genpd); + return ret; +} + +static int genpd_active_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (genpd->status == GPD_STATE_ACTIVE) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + seq_printf(s, "%lld ms\n", ktime_to_ms( + ktime_add(genpd->on_time, delta))); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_total_idle_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0, total = 0; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + for (i = 0; i < genpd->state_count; i++) { + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + total = ktime_add(total, genpd->states[i].idle_time); + } + total = ktime_add(total, delta); + + seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + + genpd_unlock(genpd); + return ret; +} + + +static int genpd_devices_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct pm_domain_data *pm_data; + const char *kobj_path; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "%s\n", kobj_path); + kfree(kobj_path); + } + + genpd_unlock(genpd); + return ret; +} + +#define define_genpd_open_function(name) \ +static int genpd_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, genpd_##name##_show, inode->i_private); \ +} + +define_genpd_open_function(summary); +define_genpd_open_function(status); +define_genpd_open_function(sub_domains); +define_genpd_open_function(idle_states); +define_genpd_open_function(active_time); +define_genpd_open_function(total_idle_time); +define_genpd_open_function(devices); + +#define define_genpd_debugfs_fops(name) \ +static const struct file_operations genpd_##name##_fops = { \ + .open = genpd_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +define_genpd_debugfs_fops(summary); +define_genpd_debugfs_fops(status); +define_genpd_debugfs_fops(sub_domains); +define_genpd_debugfs_fops(idle_states); +define_genpd_debugfs_fops(active_time); +define_genpd_debugfs_fops(total_idle_time); +define_genpd_debugfs_fops(devices); static int __init pm_genpd_debug_init(void) { struct dentry *d; + struct generic_pm_domain *genpd; pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); @@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); + pm_genpd_debugfs_dir, NULL, &genpd_summary_fops); if (!d) return -ENOMEM; + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir); + if (!d) + return -ENOMEM; + + debugfs_create_file("current_state", 0444, + d, genpd, &genpd_status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &genpd_sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &genpd_idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &genpd_active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &genpd_total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &genpd_devices_fops); + } + return 0; } late_initcall(pm_genpd_debug_init); diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 281f949c5ffe..51751cc8c9e6 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -14,23 +14,20 @@ static int dev_update_qos_constraint(struct device *dev, void *data) { s64 *constraint_ns_p = data; - s32 constraint_ns = -1; + s64 constraint_ns = -1; if (dev->power.subsys_data && dev->power.subsys_data->domain_data) constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; - if (constraint_ns < 0) { + if (constraint_ns < 0) constraint_ns = dev_pm_qos_read_value(dev); - constraint_ns *= NSEC_PER_USEC; - } - if (constraint_ns == 0) + + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) return 0; - /* - * constraint_ns cannot be negative here, because the device has been - * suspended. - */ - if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0) + constraint_ns *= NSEC_PER_USEC; + + if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0) *constraint_ns_p = constraint_ns; return 0; @@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev) spin_unlock_irqrestore(&dev->power.lock, flags); - if (constraint_ns < 0) + if (constraint_ns == 0) return false; - constraint_ns *= NSEC_PER_USEC; + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + constraint_ns = -1; + else + constraint_ns *= NSEC_PER_USEC; + /* * We can walk the children without any additional locking, because * they all have been suspended at this point and their @@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev) device_for_each_child(dev, &constraint_ns, dev_update_qos_constraint); - if (constraint_ns > 0) { - constraint_ns -= td->suspend_latency_ns + - td->resume_latency_ns; - if (constraint_ns == 0) - return false; + if (constraint_ns < 0) { + /* The children have no constraints. */ + td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + td->cached_suspend_ok = true; + } else { + constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns; + if (constraint_ns > 0) { + td->effective_constraint_ns = constraint_ns; + td->cached_suspend_ok = true; + } else { + td->effective_constraint_ns = 0; + } } - td->effective_constraint_ns = constraint_ns; - td->cached_suspend_ok = constraint_ns >= 0; /* * The children have been suspended already, so we don't need to take @@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, td = &to_gpd_data(pdd)->td; constraint_ns = td->effective_constraint_ns; /* default_suspend_ok() need not be called before us. */ - if (constraint_ns < 0) { + if (constraint_ns < 0) constraint_ns = dev_pm_qos_read_value(pdd->dev); - constraint_ns *= NSEC_PER_USEC; - } - if (constraint_ns == 0) + + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) continue; + constraint_ns *= NSEC_PER_USEC; + /* * constraint_ns cannot be negative here, because the device has * been suspended. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c99f8730de82..770b1539a083 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, dev_name(dev), pm_verb(state.event), info, error); } -#ifdef CONFIG_PM_DEBUG -static void dpm_show_time(ktime_t starttime, pm_message_t state, +static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, const char *info) { ktime_t calltime; @@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, usecs = usecs64; if (usecs == 0) usecs = 1; - pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", - info ?: "", info ? " " : "", pm_verb(state.event), - usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); + + pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + error ? "aborted" : "complete", + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } -#else -static inline void dpm_show_time(ktime_t starttime, pm_message_t state, - const char *info) {} -#endif /* CONFIG_PM_DEBUG */ static int dpm_run_callback(pm_callback_t cb, struct device *dev, pm_message_t state, const char *info) @@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) put_device(dev); } -/** - * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Call the "noirq" resume handlers for all devices in dpm_noirq_list and - * enable device drivers to receive interrupts. - */ -void dpm_resume_noirq(pm_message_t state) +void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); @@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "noirq"); + dpm_show_time(starttime, state, 0, "noirq"); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +void dpm_noirq_end(void) +{ resume_device_irqs(); device_wakeup_disarm_wake_irqs(); cpuidle_resume(); - trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +/** + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and + * allow device drivers' interrupt handlers to be called. + */ +void dpm_resume_noirq(pm_message_t state) +{ + dpm_noirq_resume_devices(state); + dpm_noirq_end(); } /** @@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "early"); + dpm_show_time(starttime, state, 0, "early"); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } @@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, NULL); + dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); @@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore || dev->power.direct_complete) goto Complete; @@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev) return __device_suspend_noirq(dev, pm_transition, false); } -/** - * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Prevent device drivers from receiving interrupts and call the "noirq" suspend - * handlers for all non-sysdev devices. - */ -int dpm_suspend_noirq(pm_message_t state) +void dpm_noirq_begin(void) +{ + cpuidle_pause(); + device_wakeup_arm_wake_irqs(); + suspend_device_irqs(); +} + +int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); - cpuidle_pause(); - device_wakeup_arm_wake_irqs(); - suspend_device_irqs(); mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -1208,15 +1217,32 @@ int dpm_suspend_noirq(pm_message_t state) if (error) { suspend_stats.failed_suspend_noirq++; dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); - dpm_resume_noirq(resume_event(state)); - } else { - dpm_show_time(starttime, state, "noirq"); } + dpm_show_time(starttime, state, error, "noirq"); trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; } /** + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Prevent device drivers' interrupt handlers from being called and invoke + * "noirq" suspend callbacks for all non-sysdev devices. + */ +int dpm_suspend_noirq(pm_message_t state) +{ + int ret; + + dpm_noirq_begin(); + ret = dpm_noirq_suspend_devices(state); + if (ret) + dpm_resume_noirq(resume_event(state)); + + return ret; +} + +/** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. @@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state) suspend_stats.failed_suspend_late++; dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - } else { - dpm_show_time(starttime, state, "late"); } + dpm_show_time(starttime, state, error, "late"); trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); return error; } @@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state) if (error) { suspend_stats.failed_suspend++; dpm_save_failed_step(SUSPEND_SUSPEND); - } else - dpm_show_time(starttime, state, NULL); + } + dpm_show_time(starttime, state, error, NULL); trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; } @@ -1835,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.no_pm_callbacks = - (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && - (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && + !dev->bus->suspend && !dev->bus->resume)) && + (!dev->class || (pm_ops_is_empty(dev->class->pm) && + !dev->class->suspend && !dev->class->resume)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && - (!dev->driver || pm_ops_is_empty(dev->driver->pm)); + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && + !dev->driver->suspend && !dev->driver->resume)); spin_unlock_irq(&dev->power.lock); } diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index a8cc14fd8ae4..a6de32530693 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, opp->available = availability_req; + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + /* Notify the change of the OPP availability */ if (availability_req) blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, @@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); + dev_pm_opp_put(opp); + goto put_table; + unlock: mutex_unlock(&opp_table->lock); +put_table: dev_pm_opp_put_opp_table(opp_table); return r; } diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 57eec1ca0569..0b718886479b 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); -/* Returns opp descriptor node for a device, caller must do of_node_put() */ -struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +/* Returns opp descriptor node for a device node, caller must + * do of_node_put() */ +static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) { /* * There should be only ONE phandle present in "operating-points-v2" * property. */ - return of_parse_phandle(dev->of_node, "operating-points-v2", 0); + return of_parse_phandle(np, "operating-points-v2", 0); +} + +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +{ + return _opp_of_get_opp_desc_node(dev->of_node); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); @@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) ret = dev_pm_opp_of_add_table(cpu_dev); if (ret) { - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", - __func__, cpu, ret); + /* + * OPP may get registered dynamically, don't print error + * message here. + */ + pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); /* Free all other OPPs */ dev_pm_opp_of_cpumask_remove_table(cpumask); @@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np; - struct device *tcpu_dev; + struct device_node *np, *tmp_np, *cpu_np; int cpu, ret = 0; /* Get OPP descriptor node */ @@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, if (cpu == cpu_dev->id) continue; - tcpu_dev = get_cpu_device(cpu); - if (!tcpu_dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + cpu_np = of_get_cpu_node(cpu, NULL); + if (!cpu_np) { + dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENODEV; + ret = -ENOENT; goto put_cpu_node; } /* Get OPP descriptor node */ - tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); + tmp_np = _opp_of_get_opp_desc_node(cpu_np); if (!tmp_np) { - dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", - __func__); + pr_err("%pOF: Couldn't find opp node\n", cpu_np); ret = -ENOENT; goto put_cpu_node; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index f850daeffba4..7d29286d9313 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) plist_head_init(&c->list); c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; - c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; c->type = PM_QOS_MIN; c->notifiers = n; @@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) mutex_unlock(&dev_pm_qos_sysfs_mtx); } -static bool dev_pm_qos_invalid_request(struct device *dev, - struct dev_pm_qos_request *req) +static bool dev_pm_qos_invalid_req_type(struct device *dev, + enum dev_pm_qos_req_type type) { - return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE - && !dev->power.set_latency_tolerance); + return type == DEV_PM_QOS_LATENCY_TOLERANCE && + !dev->power.set_latency_tolerance; } static int __dev_pm_qos_add_request(struct device *dev, @@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev, { int ret = 0; - if (!dev || dev_pm_qos_invalid_request(dev, req)) + if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) return -EINVAL; if (WARN(dev_pm_qos_request_active(req), diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..13e015905543 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; - else if (__dev_pm_qos_read_value(dev) < 0) + else if (__dev_pm_qos_read_value(dev) == 0) retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 156ab57bca77..632077f05c57 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); + s32 value = dev_pm_qos_requested_resume_latency(dev); + + if (value == 0) + return sprintf(buf, "n/a\n"); + else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + value = 0; + + return sprintf(buf, "%d\n", value); } static ssize_t pm_qos_resume_latency_store(struct device *dev, @@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, s32 value; int ret; - if (kstrtos32(buf, 0, &value)) - return -EINVAL; + if (!kstrtos32(buf, 0, &value)) { + /* + * Prevent users from writing negative or "no constraint" values + * directly. + */ + if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + return -EINVAL; - if (value < 0) + if (value == 0) + value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) { + value = 0; + } else { return -EINVAL; + } ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, value); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..cdd6f256da59 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; + dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { - if (wakeup_sysfs_add(dev)) - return; + int ret = wakeup_sysfs_add(dev); + + if (ret) + dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } - dev->power.can_wakeup = capable; } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); @@ -863,7 +865,7 @@ bool pm_wakeup_pending(void) void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); - freeze_wake(); + s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); diff --git a/drivers/base/property.c b/drivers/base/property.c index edf02c1b5845..7ed99c1b2a8b 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -21,23 +21,30 @@ #include <linux/phy.h> struct property_set { + struct device *dev; struct fwnode_handle fwnode; const struct property_entry *properties; }; -static inline bool is_pset_node(struct fwnode_handle *fwnode) -{ - return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA; -} +static const struct fwnode_operations pset_fwnode_ops; -static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) +static inline bool is_pset_node(const struct fwnode_handle *fwnode) { - return is_pset_node(fwnode) ? - container_of(fwnode, struct property_set, fwnode) : NULL; + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops; } -static const struct property_entry *pset_prop_get(struct property_set *pset, - const char *name) +#define to_pset_node(__fwnode) \ + ({ \ + typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \ + \ + is_pset_node(__to_pset_node_fwnode) ? \ + container_of(__to_pset_node_fwnode, \ + struct property_set, fwnode) : \ + NULL; \ + }) + +static const struct property_entry * +pset_prop_get(const struct property_set *pset, const char *name) { const struct property_entry *prop; @@ -51,7 +58,7 @@ static const struct property_entry *pset_prop_get(struct property_set *pset, return NULL; } -static const void *pset_prop_find(struct property_set *pset, +static const void *pset_prop_find(const struct property_set *pset, const char *propname, size_t length) { const struct property_entry *prop; @@ -71,7 +78,7 @@ static const void *pset_prop_find(struct property_set *pset, return pointer; } -static int pset_prop_read_u8_array(struct property_set *pset, +static int pset_prop_read_u8_array(const struct property_set *pset, const char *propname, u8 *values, size_t nval) { @@ -86,7 +93,7 @@ static int pset_prop_read_u8_array(struct property_set *pset, return 0; } -static int pset_prop_read_u16_array(struct property_set *pset, +static int pset_prop_read_u16_array(const struct property_set *pset, const char *propname, u16 *values, size_t nval) { @@ -101,7 +108,7 @@ static int pset_prop_read_u16_array(struct property_set *pset, return 0; } -static int pset_prop_read_u32_array(struct property_set *pset, +static int pset_prop_read_u32_array(const struct property_set *pset, const char *propname, u32 *values, size_t nval) { @@ -116,7 +123,7 @@ static int pset_prop_read_u32_array(struct property_set *pset, return 0; } -static int pset_prop_read_u64_array(struct property_set *pset, +static int pset_prop_read_u64_array(const struct property_set *pset, const char *propname, u64 *values, size_t nval) { @@ -131,7 +138,7 @@ static int pset_prop_read_u64_array(struct property_set *pset, return 0; } -static int pset_prop_count_elems_of_size(struct property_set *pset, +static int pset_prop_count_elems_of_size(const struct property_set *pset, const char *propname, size_t length) { const struct property_entry *prop; @@ -143,7 +150,7 @@ static int pset_prop_count_elems_of_size(struct property_set *pset, return prop->length / length; } -static int pset_prop_read_string_array(struct property_set *pset, +static int pset_prop_read_string_array(const struct property_set *pset, const char *propname, const char **strings, size_t nval) { @@ -187,18 +194,18 @@ struct fwnode_handle *dev_fwnode(struct device *dev) } EXPORT_SYMBOL_GPL(dev_fwnode); -static bool pset_fwnode_property_present(struct fwnode_handle *fwnode, +static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) { return !!pset_prop_get(to_pset_node(fwnode), propname); } -static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode, +static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval) { - struct property_set *node = to_pset_node(fwnode); + const struct property_set *node = to_pset_node(fwnode); if (!val) return pset_prop_count_elems_of_size(node, propname, elem_size); @@ -217,9 +224,10 @@ static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode, return -ENXIO; } -static int pset_fwnode_property_read_string_array(struct fwnode_handle *fwnode, - const char *propname, - const char **val, size_t nval) +static int +pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) { return pset_prop_read_string_array(to_pset_node(fwnode), propname, val, nval); @@ -249,7 +257,8 @@ EXPORT_SYMBOL_GPL(device_property_present); * @fwnode: Firmware node whose property to check * @propname: Name of the property */ -bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) +bool fwnode_property_present(const struct fwnode_handle *fwnode, + const char *propname) { bool ret; @@ -431,7 +440,7 @@ int device_property_match_string(struct device *dev, const char *propname, } EXPORT_SYMBOL_GPL(device_property_match_string); -static int fwnode_property_read_int_array(struct fwnode_handle *fwnode, +static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval) @@ -467,7 +476,7 @@ static int fwnode_property_read_int_array(struct fwnode_handle *fwnode, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u8), @@ -493,7 +502,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u16), @@ -519,7 +528,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u32), @@ -545,7 +554,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, +int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u64), @@ -571,7 +580,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_string_array(struct fwnode_handle *fwnode, +int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { @@ -603,7 +612,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); * %-EPROTO or %-EILSEQ if the property is not a string, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_read_string(struct fwnode_handle *fwnode, +int fwnode_property_read_string(const struct fwnode_handle *fwnode, const char *propname, const char **val) { int ret = fwnode_property_read_string_array(fwnode, propname, val, 1); @@ -627,7 +636,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string); * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ -int fwnode_property_match_string(struct fwnode_handle *fwnode, +int fwnode_property_match_string(const struct fwnode_handle *fwnode, const char *propname, const char *string) { const char **values; @@ -657,6 +666,38 @@ out: } EXPORT_SYMBOL_GPL(fwnode_property_match_string); +/** + * fwnode_property_get_reference_args() - Find a reference with arguments + * @fwnode: Firmware node where to look for the reference + * @prop: The name of the property + * @nargs_prop: The name of the property telling the number of + * arguments in the referred node. NULL if @nargs is known, + * otherwise @nargs is ignored. Only relevant on OF. + * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL. + * @index: Index of the reference, from zero onwards. + * @args: Result structure with reference and integer arguments. + * + * Obtain a reference based on a named property in an fwnode, with + * integer arguments. + * + * Caller is responsible to call fwnode_handle_put() on the returned + * args->fwnode pointer. + * + * Returns: %0 on success + * %-ENOENT when the index is out of bounds, the index has an empty + * reference or the property was not found + * %-EINVAL on parse error + */ +int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args) +{ + return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, + nargs, index, args); +} +EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); + static int property_copy_string_array(struct property_entry *dst, const struct property_entry *src) { @@ -855,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset) void device_remove_properties(struct device *dev) { struct fwnode_handle *fwnode; + struct property_set *pset; fwnode = dev_fwnode(dev); if (!fwnode) @@ -864,16 +906,16 @@ void device_remove_properties(struct device *dev) * the pset. If there is no real firmware node (ACPI/DT) primary * will hold the pset. */ - if (is_pset_node(fwnode)) { + pset = to_pset_node(fwnode); + if (pset) { set_primary_fwnode(dev, NULL); - pset_free_set(to_pset_node(fwnode)); } else { - fwnode = fwnode->secondary; - if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { + pset = to_pset_node(fwnode->secondary); + if (pset && dev == pset->dev) set_secondary_fwnode(dev, NULL); - pset_free_set(to_pset_node(fwnode)); - } } + if (pset && dev == pset->dev) + pset_free_set(pset); } EXPORT_SYMBOL_GPL(device_remove_properties); @@ -900,9 +942,9 @@ int device_add_properties(struct device *dev, if (IS_ERR(p)) return PTR_ERR(p); - p->fwnode.type = FWNODE_PDATA; p->fwnode.ops = &pset_fwnode_ops; set_secondary_fwnode(dev, &p->fwnode); + p->dev = dev; return 0; } EXPORT_SYMBOL_GPL(device_add_properties); @@ -935,7 +977,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent); * Return parent firmware node of the given node if possible or %NULL if no * parent was available. */ -struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode) +struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_parent); } @@ -946,8 +988,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent); * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. */ -struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode, - struct fwnode_handle *child) +struct fwnode_handle * +fwnode_get_next_child_node(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) { return fwnode_call_ptr_op(fwnode, get_next_child_node, child); } @@ -978,8 +1021,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node); * @fwnode: Firmware node to find the named child node for. * @childname: String to match child node name against. */ -struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode, - const char *childname) +struct fwnode_handle * +fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) { return fwnode_call_ptr_op(fwnode, get_named_child_node, childname); } @@ -1025,7 +1069,7 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put); * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. */ -bool fwnode_device_is_available(struct fwnode_handle *fwnode) +bool fwnode_device_is_available(const struct fwnode_handle *fwnode) { return fwnode_call_bool_op(fwnode, device_is_available); } @@ -1163,7 +1207,7 @@ EXPORT_SYMBOL(device_get_mac_address); * are available. */ struct fwnode_handle * -fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, +fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev); @@ -1177,7 +1221,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); * Return: the firmware node of the device the @endpoint belongs to. */ struct fwnode_handle * -fwnode_graph_get_port_parent(struct fwnode_handle *endpoint) +fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint) { struct fwnode_handle *port, *parent; @@ -1197,7 +1241,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent); * Extracts firmware node of a remote device the @fwnode points to. */ struct fwnode_handle * -fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode) +fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint, *parent; @@ -1216,7 +1260,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); * * Extracts firmware node of a remote port the @fwnode points to. */ -struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode) +struct fwnode_handle * +fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode) { return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode)); } @@ -1229,7 +1274,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); * Extracts firmware node of a remote endpoint the @fwnode points to. */ struct fwnode_handle * -fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint); } @@ -1244,8 +1289,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); * Return: Remote fwnode handle associated with remote endpoint node linked * to @node. Use fwnode_node_put() on it when done. */ -struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode, - u32 port_id, u32 endpoint_id) +struct fwnode_handle * +fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id, + u32 endpoint_id) { struct fwnode_handle *endpoint = NULL; @@ -1281,7 +1327,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node); * information in @endpoint. The caller must hold a reference to * @fwnode. */ -int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, +int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint) { memset(endpoint, 0, sizeof(*endpoint)); diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 073c0b77e5b3..e7fa7b4bf9af 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -5,6 +5,7 @@ config REGMAP default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ) select IRQ_DOMAIN if REGMAP_IRQ + select REGMAP_HWSPINLOCK if HWSPINLOCK=y bool config REGCACHE_COMPRESSED @@ -36,3 +37,6 @@ config REGMAP_MMIO config REGMAP_IRQ bool + +config REGMAP_HWSPINLOCK + bool diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 2a4435d76028..8641183cac2f 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -157,6 +157,8 @@ struct regmap { struct rb_root range_tree; void *selector_work_buf; /* Scratch buffer used for selector */ + + struct hwspinlock *hwlock; }; struct regcache_ops { diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c index 5f04e7bf063e..e6c64b0be5b2 100644 --- a/drivers/base/regmap/regmap-w1.c +++ b/drivers/base/regmap/regmap-w1.c @@ -1,7 +1,7 @@ /* * Register map access API - W1 (1-Wire) support * - * Copyright (C) 2017 OAO Radioavionica + * Copyright (c) 2017 Radioavionica Corporation * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> * * This program is free software; you can redistribute it and/or modify @@ -11,7 +11,7 @@ #include <linux/regmap.h> #include <linux/module.h> -#include "../../w1/w1.h" +#include <linux/w1.h> #include "internal.h" diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index b9a779a4a739..8d516a9bfc01 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -20,6 +20,7 @@ #include <linux/sched.h> #include <linux/delay.h> #include <linux/log2.h> +#include <linux/hwspinlock.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -413,6 +414,51 @@ static unsigned int regmap_parse_64_native(const void *buf) } #endif +#ifdef REGMAP_HWSPINLOCK +static void regmap_lock_hwlock(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout(map->hwlock, UINT_MAX); +} + +static void regmap_lock_hwlock_irq(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); +} + +static void regmap_lock_hwlock_irqsave(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, + &map->spinlock_flags); +} + +static void regmap_unlock_hwlock(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock(map->hwlock); +} + +static void regmap_unlock_hwlock_irq(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock_irq(map->hwlock); +} + +static void regmap_unlock_hwlock_irqrestore(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); +} +#endif + static void regmap_lock_mutex(void *__map) { struct regmap *map = __map; @@ -627,6 +673,34 @@ struct regmap *__regmap_init(struct device *dev, map->lock = config->lock; map->unlock = config->unlock; map->lock_arg = config->lock_arg; + } else if (config->hwlock_id) { +#ifdef REGMAP_HWSPINLOCK + map->hwlock = hwspin_lock_request_specific(config->hwlock_id); + if (!map->hwlock) { + ret = -ENXIO; + goto err_map; + } + + switch (config->hwlock_mode) { + case HWLOCK_IRQSTATE: + map->lock = regmap_lock_hwlock_irqsave; + map->unlock = regmap_unlock_hwlock_irqrestore; + break; + case HWLOCK_IRQ: + map->lock = regmap_lock_hwlock_irq; + map->unlock = regmap_unlock_hwlock_irq; + break; + default: + map->lock = regmap_lock_hwlock; + map->unlock = regmap_unlock_hwlock; + break; + } + + map->lock_arg = map; +#else + ret = -EINVAL; + goto err_map; +#endif } else { if ((bus && bus->fast_io) || config->fast_io) { @@ -729,7 +803,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_2_6_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -739,7 +813,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_4_12_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -749,7 +823,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_7_9_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -759,7 +833,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_10_14_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -779,13 +853,13 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_reg = regmap_format_16_native; break; default: - goto err_map; + goto err_hwlock; } break; case 24: if (reg_endian != REGMAP_ENDIAN_BIG) - goto err_map; + goto err_hwlock; map->format.format_reg = regmap_format_24; break; @@ -801,7 +875,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_reg = regmap_format_32_native; break; default: - goto err_map; + goto err_hwlock; } break; @@ -818,13 +892,13 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_reg = regmap_format_64_native; break; default: - goto err_map; + goto err_hwlock; } break; #endif default: - goto err_map; + goto err_hwlock; } if (val_endian == REGMAP_ENDIAN_NATIVE) @@ -853,12 +927,12 @@ struct regmap *__regmap_init(struct device *dev, map->format.parse_val = regmap_parse_16_native; break; default: - goto err_map; + goto err_hwlock; } break; case 24: if (val_endian != REGMAP_ENDIAN_BIG) - goto err_map; + goto err_hwlock; map->format.format_val = regmap_format_24; map->format.parse_val = regmap_parse_24; break; @@ -879,7 +953,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.parse_val = regmap_parse_32_native; break; default: - goto err_map; + goto err_hwlock; } break; #ifdef CONFIG_64BIT @@ -900,7 +974,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.parse_val = regmap_parse_64_native; break; default: - goto err_map; + goto err_hwlock; } break; #endif @@ -909,18 +983,18 @@ struct regmap *__regmap_init(struct device *dev, if (map->format.format_write) { if ((reg_endian != REGMAP_ENDIAN_BIG) || (val_endian != REGMAP_ENDIAN_BIG)) - goto err_map; + goto err_hwlock; map->use_single_write = true; } if (!map->format.format_write && !(map->format.format_reg && map->format.format_val)) - goto err_map; + goto err_hwlock; map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); if (map->work_buf == NULL) { ret = -ENOMEM; - goto err_map; + goto err_hwlock; } if (map->format.format_write) { @@ -1041,6 +1115,9 @@ err_regcache: err_range: regmap_range_exit(map); kfree(map->work_buf); +err_hwlock: + if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock) + hwspin_lock_free(map->hwlock); err_map: kfree(map); err: @@ -1228,6 +1305,8 @@ void regmap_exit(struct regmap *map) kfree(async->work_buf); kfree(async); } + if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock) + hwspin_lock_free(map->hwlock); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index d6ec1c546f5b..d936fcf9f1fb 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -105,7 +105,7 @@ static struct attribute *default_attrs[] = { NULL }; -static struct attribute_group topology_attr_group = { +static const struct attribute_group topology_attr_group = { .attrs = default_attrs, .name = "topology" }; |