diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/arch_topology.c | 86 | ||||
-rw-r--r-- | drivers/base/base.h | 5 | ||||
-rw-r--r-- | drivers/base/bus.c | 2 | ||||
-rw-r--r-- | drivers/base/core.c | 141 | ||||
-rw-r--r-- | drivers/base/dd.c | 32 | ||||
-rw-r--r-- | drivers/base/dma-coherent.c | 164 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 2 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 62 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 259 | ||||
-rw-r--r-- | drivers/base/power/main.c | 103 | ||||
-rw-r--r-- | drivers/base/power/opp/of.c | 37 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 10 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-w1.c | 4 | ||||
-rw-r--r-- | drivers/base/topology.c | 2 |
14 files changed, 697 insertions, 212 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index d1c33a85059e..41be9ff7d70a 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -41,8 +41,7 @@ static ssize_t cpu_capacity_show(struct device *dev, { struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%lu\n", - topology_get_cpu_scale(NULL, cpu->dev.id)); + return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); } static ssize_t cpu_capacity_store(struct device *dev, @@ -96,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl); static u32 capacity_scale; static u32 *raw_capacity; -static bool cap_parsing_failed; + +static int __init free_raw_capacity(void) +{ + kfree(raw_capacity); + raw_capacity = NULL; + + return 0; +} void topology_normalize_cpu_scale(void) { u64 capacity; int cpu; - if (!raw_capacity || cap_parsing_failed) + if (!raw_capacity) return; pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); @@ -120,16 +126,16 @@ void topology_normalize_cpu_scale(void) mutex_unlock(&cpu_scale_mutex); } -int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) +bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) { - int ret = 1; + static bool cap_parsing_failed; + int ret; u32 cpu_capacity; if (cap_parsing_failed) - return !ret; + return false; - ret = of_property_read_u32(cpu_node, - "capacity-dmips-mhz", + ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", &cpu_capacity); if (!ret) { if (!raw_capacity) { @@ -139,21 +145,21 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) if (!raw_capacity) { pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); cap_parsing_failed = true; - return 0; + return false; } } capacity_scale = max(cpu_capacity, capacity_scale); raw_capacity[cpu] = cpu_capacity; - pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n", - cpu_node->full_name, raw_capacity[cpu]); + pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", + cpu_node, raw_capacity[cpu]); } else { if (raw_capacity) { - pr_err("cpu_capacity: missing %s raw capacity\n", - cpu_node->full_name); + pr_err("cpu_capacity: missing %pOF raw capacity\n", + cpu_node); pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); } cap_parsing_failed = true; - kfree(raw_capacity); + free_raw_capacity(); } return !ret; @@ -161,7 +167,6 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) #ifdef CONFIG_CPU_FREQ static cpumask_var_t cpus_to_visit; -static bool cap_parsing_done; static void parsing_done_workfn(struct work_struct *work); static DECLARE_WORK(parsing_done_work, parsing_done_workfn); @@ -173,30 +178,31 @@ init_cpu_capacity_callback(struct notifier_block *nb, struct cpufreq_policy *policy = data; int cpu; - if (cap_parsing_failed || cap_parsing_done) + if (!raw_capacity) return 0; - switch (val) { - case CPUFREQ_NOTIFY: - pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", - cpumask_pr_args(policy->related_cpus), - cpumask_pr_args(cpus_to_visit)); - cpumask_andnot(cpus_to_visit, - cpus_to_visit, - policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) { - raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * - policy->cpuinfo.max_freq / 1000UL; - capacity_scale = max(raw_capacity[cpu], capacity_scale); - } - if (cpumask_empty(cpus_to_visit)) { - topology_normalize_cpu_scale(); - kfree(raw_capacity); - pr_debug("cpu_capacity: parsing done\n"); - cap_parsing_done = true; - schedule_work(&parsing_done_work); - } + if (val != CPUFREQ_NOTIFY) + return 0; + + pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", + cpumask_pr_args(policy->related_cpus), + cpumask_pr_args(cpus_to_visit)); + + cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); + + for_each_cpu(cpu, policy->related_cpus) { + raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) * + policy->cpuinfo.max_freq / 1000UL; + capacity_scale = max(raw_capacity[cpu], capacity_scale); + } + + if (cpumask_empty(cpus_to_visit)) { + topology_normalize_cpu_scale(); + free_raw_capacity(); + pr_debug("cpu_capacity: parsing done\n"); + schedule_work(&parsing_done_work); } + return 0; } @@ -233,11 +239,5 @@ static void parsing_done_workfn(struct work_struct *work) } #else -static int __init free_raw_capacity(void) -{ - kfree(raw_capacity); - - return 0; -} core_initcall(free_raw_capacity); #endif diff --git a/drivers/base/base.h b/drivers/base/base.h index e19b1008e5fb..539432a14b5c 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv, extern void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); -extern int device_add_groups(struct device *dev, - const struct attribute_group **groups); -extern void device_remove_groups(struct device *dev, - const struct attribute_group **groups); - extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index e162c9a789ba..22a64fd3309b 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv) out_unregister: kobject_put(&priv->kobj); - kfree(drv->p); + /* drv->p is freed in driver_release() */ drv->p = NULL; out_put_bus: bus_put(bus); diff --git a/drivers/base/core.c b/drivers/base/core.c index 755451f684bc..12ebd055724c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1023,12 +1023,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups) { return sysfs_create_groups(&dev->kobj, groups); } +EXPORT_SYMBOL_GPL(device_add_groups); void device_remove_groups(struct device *dev, const struct attribute_group **groups) { sysfs_remove_groups(&dev->kobj, groups); } +EXPORT_SYMBOL_GPL(device_remove_groups); + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +static int devm_attr_group_match(struct device *dev, void *res, void *data) +{ + return ((union device_attr_group_devres *)res)->group == data; +} + +static void devm_attr_group_remove(struct device *dev, void *res) +{ + union device_attr_group_devres *devres = res; + const struct attribute_group *group = devres->group; + + dev_dbg(dev, "%s: removing group %p\n", __func__, group); + sysfs_remove_group(&dev->kobj, group); +} + +static void devm_attr_groups_remove(struct device *dev, void *res) +{ + union device_attr_group_devres *devres = res; + const struct attribute_group **groups = devres->groups; + + dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); + sysfs_remove_groups(&dev->kobj, groups); +} + +/** + * devm_device_add_group - given a device, create a managed attribute group + * @dev: The device to create the group for + * @grp: The attribute group to create + * + * This function creates a group for the first time. It will explicitly + * warn and error if any of the attribute files being created already exist. + * + * Returns 0 on success or error code on failure. + */ +int devm_device_add_group(struct device *dev, const struct attribute_group *grp) +{ + union device_attr_group_devres *devres; + int error; + + devres = devres_alloc(devm_attr_group_remove, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + error = sysfs_create_group(&dev->kobj, grp); + if (error) { + devres_free(devres); + return error; + } + + devres->group = grp; + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_device_add_group); + +/** + * devm_device_remove_group: remove a managed group from a device + * @dev: device to remove the group from + * @grp: group to remove + * + * This function removes a group of attributes from a device. The attributes + * previously have to have been created for this group, otherwise it will fail. + */ +void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp) +{ + WARN_ON(devres_release(dev, devm_attr_group_remove, + devm_attr_group_match, + /* cast away const */ (void *)grp)); +} +EXPORT_SYMBOL_GPL(devm_device_remove_group); + +/** + * devm_device_add_groups - create a bunch of managed attribute groups + * @dev: The device to create the group for + * @groups: The attribute groups to create, NULL terminated + * + * This function creates a bunch of managed attribute groups. If an error + * occurs when creating a group, all previously created groups will be + * removed, unwinding everything back to the original state when this + * function was called. It will explicitly warn and error if any of the + * attribute files being created already exist. + * + * Returns 0 on success or error code from sysfs_create_group on failure. + */ +int devm_device_add_groups(struct device *dev, + const struct attribute_group **groups) +{ + union device_attr_group_devres *devres; + int error; + + devres = devres_alloc(devm_attr_groups_remove, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + error = sysfs_create_groups(&dev->kobj, groups); + if (error) { + devres_free(devres); + return error; + } + + devres->groups = groups; + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_device_add_groups); + +/** + * devm_device_remove_groups - remove a list of managed groups + * + * @dev: The device for the groups to be removed from + * @groups: NULL terminated list of groups to be removed + * + * If groups is not NULL, remove the specified groups from the device. + */ +void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups) +{ + WARN_ON(devres_release(dev, devm_attr_groups_remove, + devm_attr_group_match, + /* cast away const */ (void *)groups)); +} +EXPORT_SYMBOL_GPL(devm_device_remove_groups); static int device_add_attrs(struct device *dev) { @@ -2664,11 +2796,12 @@ void device_shutdown(void) pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); - if (dev->class && dev->class->shutdown) { + if (dev->class && dev->class->shutdown_pre) { if (initcall_debug) - dev_info(dev, "shutdown\n"); - dev->class->shutdown(dev); - } else if (dev->bus && dev->bus->shutdown) { + dev_info(dev, "shutdown_pre\n"); + dev->class->shutdown_pre(dev); + } + if (dev->bus && dev->bus->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 4882f06d12df..ad44b40fe284 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -20,6 +20,7 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/init.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/wait.h> @@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static atomic_t deferred_trigger_count = ATOMIC_INIT(0); +static bool initcalls_done; /* * In some cases, like suspend to RAM or hibernation, It might be reasonable @@ -62,6 +64,26 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0); static bool defer_all_probes; /* + * For initcall_debug, show the deferred probes executed in late_initcall + * processing. + */ +static void deferred_probe_debug(struct device *dev) +{ + ktime_t calltime, delta, rettime; + unsigned long long duration; + + printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev), + task_pid_nr(current)); + calltime = ktime_get(); + bus_probe_device(dev); + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n", + dev_name(dev), duration); +} + +/* * deferred_probe_work_func() - Retry probing devices in the active list. */ static void deferred_probe_work_func(struct work_struct *work) @@ -106,7 +128,10 @@ static void deferred_probe_work_func(struct work_struct *work) device_pm_unlock(); dev_dbg(dev, "Retrying from deferred list\n"); - bus_probe_device(dev); + if (initcall_debug && !initcalls_done) + deferred_probe_debug(dev); + else + bus_probe_device(dev); mutex_lock(&deferred_probe_mutex); @@ -215,6 +240,7 @@ static int deferred_probe_initcall(void) driver_deferred_probe_trigger(); /* Sort as many dependencies as possible before exiting initcalls */ flush_work(&deferred_probe_work); + initcalls_done = true; return 0; } late_initcall(deferred_probe_initcall); @@ -259,6 +285,8 @@ static void driver_bound(struct device *dev) if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_BOUND_DRIVER, dev); + + kobject_uevent(&dev->kobj, KOBJ_BIND); } static int driver_sysfs_add(struct device *dev) @@ -848,6 +876,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_UNBOUND_DRIVER, dev); + + kobject_uevent(&dev->kobj, KOBJ_UNBIND); } } diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c..1c152aed6b82 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de { if (dev && dev->dma_mem) return dev->dma_mem; - return dma_coherent_default_memory; + return NULL; } static inline dma_addr_t dma_get_device_base(struct device *dev, @@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev, } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); -/** - * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area - * - * @dev: device from which we allocate memory - * @size: size of requested memory area - * @dma_handle: This will be filled with the correct dma handle - * @ret: This pointer will be filled with the virtual address - * to allocated area. - * - * This function should be only called from per-arch dma_alloc_coherent() - * to support allocation from per-device coherent memory pools. - * - * Returns 0 if dma_alloc_coherent should continue with allocating from - * generic memory areas, or !0 if dma_alloc_coherent should return @ret. - */ -int dma_alloc_from_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) +static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); int order = get_order(size); unsigned long flags; int pageno; int dma_memory_map; + void *ret; - if (!mem) - return 0; - - *ret = NULL; spin_lock_irqsave(&mem->spinlock, flags); if (unlikely(size > (mem->size << PAGE_SHIFT))) @@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, goto err; /* - * Memory was found in the per-device area. + * Memory was found in the coherent area. */ - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); - *ret = mem->virt_base + (pageno << PAGE_SHIFT); + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + ret = mem->virt_base + (pageno << PAGE_SHIFT); dma_memory_map = (mem->flags & DMA_MEMORY_MAP); spin_unlock_irqrestore(&mem->spinlock, flags); if (dma_memory_map) - memset(*ret, 0, size); + memset(ret, 0, size); else - memset_io(*ret, 0, size); + memset_io(ret, 0, size); - return 1; + return ret; err: spin_unlock_irqrestore(&mem->spinlock, flags); + return NULL; +} + +/** + * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool + * @dev: device from which we allocate memory + * @size: size of requested memory area + * @dma_handle: This will be filled with the correct dma handle + * @ret: This pointer will be filled with the virtual address + * to allocated area. + * + * This function should be only called from per-arch dma_alloc_coherent() + * to support allocation from per-device coherent memory pools. + * + * Returns 0 if dma_alloc_coherent should continue with allocating from + * generic memory areas, or !0 if dma_alloc_coherent should return @ret. + */ +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + if (!mem) + return 0; + + *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + if (*ret) + return 1; + /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the @@ -225,25 +235,20 @@ err: */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } -EXPORT_SYMBOL(dma_alloc_from_coherent); +EXPORT_SYMBOL(dma_alloc_from_dev_coherent); -/** - * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool - * @dev: device from which the memory was allocated - * @order: the order of pages allocated - * @vaddr: virtual address of allocated pages - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, releases that memory. - * - * Returns 1 if we correctly released the memory, or 0 if - * dma_release_coherent() should proceed with releasing memory from - * generic pools. - */ -int dma_release_from_coherent(struct device *dev, int order, void *vaddr) +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dma_coherent_default_memory, size, + dma_handle); +} +static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) +{ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; @@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) } return 0; } -EXPORT_SYMBOL(dma_release_from_coherent); /** - * dma_mmap_from_coherent() - try to mmap the memory allocated from - * per-device coherent memory pool to userspace + * dma_release_from_dev_coherent() - free memory to device coherent memory pool * @dev: device from which the memory was allocated - * @vma: vm_area for the userspace memory - * @vaddr: cpu address returned by dma_alloc_from_coherent - * @size: size of the memory buffer allocated by dma_alloc_from_coherent - * @ret: result from remap_pfn_range() + * @order: the order of pages allocated + * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, maps that memory to the provided vma. + * coherent memory pool and if so, releases that memory. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if we correctly released the memory, or 0 if the caller should + * proceed with releasing memory from generic pools. */ -int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, - void *vaddr, size_t size, int *ret) +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + return __dma_release_from_coherent(mem, order, vaddr); +} +EXPORT_SYMBOL(dma_release_from_dev_coherent); + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + +static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) +{ if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; @@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, } return 0; } -EXPORT_SYMBOL(dma_mmap_from_coherent); + +/** + * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool + * @dev: device from which the memory was allocated + * @vma: vm_area for the userspace memory + * @vaddr: cpu address returned by dma_alloc_from_dev_coherent + * @size: size of the memory buffer allocated + * @ret: result from remap_pfn_range() + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, maps that memory to the provided vma. + * + * Returns 1 if we correctly mapped the memory, or 0 if the caller should + * proceed with mapping memory from generic pools. + */ +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); +} +EXPORT_SYMBOL(dma_mmap_from_dev_coherent); + +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, + size_t size, int *ret) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, + vaddr, size, ret); +} /* * Support for reserved memory regions defined in device tree diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e..b555ff9dd8fc 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf7..a5fb884a136d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -7,6 +7,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/capability.h> #include <linux/device.h> #include <linux/module.h> @@ -30,7 +32,6 @@ #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> -#include <linux/swait.h> #include <generated/utsrelease.h> @@ -112,13 +113,13 @@ static inline long firmware_loading_timeout(void) * state of the firmware loading. */ struct fw_state { - struct swait_queue_head wq; + struct completion completion; enum fw_status status; }; static void fw_state_init(struct fw_state *fw_st) { - init_swait_queue_head(&fw_st->wq); + init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } @@ -131,9 +132,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) { long ret; - ret = swait_event_interruptible_timeout(fw_st->wq, - __fw_state_is_done(READ_ONCE(fw_st->status)), - timeout); + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) return -ENOENT; if (!ret) @@ -148,35 +147,34 @@ static void __fw_state_set(struct fw_state *fw_st, WRITE_ONCE(fw_st->status, status); if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) - swake_up(&fw_st->wq); + complete_all(&fw_st->completion); } #define fw_state_start(fw_st) \ __fw_state_set(fw_st, FW_STATUS_LOADING) #define fw_state_done(fw_st) \ __fw_state_set(fw_st, FW_STATUS_DONE) +#define fw_state_aborted(fw_st) \ + __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_wait(fw_st) \ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) -#ifndef CONFIG_FW_LOADER_USER_HELPER - -#define fw_state_is_aborted(fw_st) false - -#else /* CONFIG_FW_LOADER_USER_HELPER */ - static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) { return fw_st->status == status; } +#define fw_state_is_aborted(fw_st) \ + __fw_state_check(fw_st, FW_STATUS_ABORTED) + +#ifdef CONFIG_FW_LOADER_USER_HELPER + #define fw_state_aborted(fw_st) \ __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_is_done(fw_st) \ __fw_state_check(fw_st, FW_STATUS_DONE) #define fw_state_is_loading(fw_st) \ __fw_state_check(fw_st, FW_STATUS_LOADING) -#define fw_state_is_aborted(fw_st) \ - __fw_state_check(fw_st, FW_STATUS_ABORTED) #define fw_state_wait_timeout(fw_st, timeout) \ __fw_state_wait_common(fw_st, timeout) @@ -335,6 +333,7 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name) return NULL; } +/* Returns 1 for batching firmware requests with the same name */ static int fw_lookup_and_allocate_buf(const char *fw_name, struct firmware_cache *fwc, struct firmware_buf **buf, void *dbuf, @@ -348,6 +347,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name, kref_get(&tmp->ref); spin_unlock(&fwc->lock); *buf = tmp; + pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n"); return 1; } tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size); @@ -1089,9 +1089,12 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, mutex_unlock(&fw_lock); } - if (fw_state_is_aborted(&buf->fw_st)) - retval = -EAGAIN; - else if (buf->is_paged_buf && !buf->data) + if (fw_state_is_aborted(&buf->fw_st)) { + if (retval == -ERESTARTSYS) + retval = -EINTR; + else + retval = -EAGAIN; + } else if (buf->is_paged_buf && !buf->data) retval = -ENOMEM; device_del(f_dev); @@ -1200,6 +1203,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 1; /* need to load */ } +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct firmware_buf and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct firmware_buf *buf; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + buf = fw->priv; + if (!fw_state_is_aborted(&buf->fw_st)) + fw_state_aborted(&buf->fw_st); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1243,6 +1268,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, out: if (ret < 0) { + fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 3b8210ebb50e..e8ca5e2cf1e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } +#ifdef CONFIG_DEBUG_FS +static void genpd_update_accounting(struct generic_pm_domain *genpd) +{ + ktime_t delta, now; + + now = ktime_get(); + delta = ktime_sub(now, genpd->accounting_time); + + /* + * If genpd->status is active, it means we are just + * out of off and so update the idle time and vice + * versa. + */ + if (genpd->status == GPD_STATE_ACTIVE) { + int state_idx = genpd->state_idx; + + genpd->states[state_idx].idle_time = + ktime_add(genpd->states[state_idx].idle_time, delta); + } else { + genpd->on_time = ktime_add(genpd->on_time, delta); + } + + genpd->accounting_time = now; +} +#else +static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} +#endif + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, } genpd->status = GPD_STATE_POWER_OFF; + genpd_update_accounting(genpd); list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) goto err; genpd->status = GPD_STATE_ACTIVE; + genpd_update_accounting(genpd); + return 0; err: @@ -1222,8 +1253,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); - dev_pm_domain_set(dev, &genpd->domain); - return gpd_data; err_free: @@ -1237,8 +1266,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev, struct generic_pm_domain_data *gpd_data) { - dev_pm_domain_set(dev, NULL); - spin_lock_irq(&dev->power.lock); dev->power.subsys_data->domain_data = NULL; @@ -1275,6 +1302,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (ret) goto out; + dev_pm_domain_set(dev, &genpd->domain); + genpd->device_count++; genpd->max_off_time_changed = true; @@ -1336,6 +1365,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, if (genpd->detach_dev) genpd->detach_dev(genpd, dev); + dev_pm_domain_set(dev, NULL); + list_del_init(&pdd->list_node); genpd_unlock(genpd); @@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->provider = NULL; genpd->has_provider = false; + genpd->accounting_time = ktime_get(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; @@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, mutex_lock(&of_genpd_mutex); list_add(&cp->link, &of_genpd_providers); mutex_unlock(&of_genpd_mutex); - pr_debug("Added domain provider from %s\n", np->full_name); + pr_debug("Added domain provider from %pOF\n", np); return 0; } @@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } @@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, ret = genpd_parse_state(&st[i++], np); if (ret) { pr_err - ("Parsing idle state node %s failed with err %d\n", - np->full_name, ret); + ("Parsing idle state node %pOF failed with err %d\n", + np, ret); of_node_put(np); kfree(st); return ret; @@ -2327,7 +2359,7 @@ exit: return 0; } -static int pm_genpd_summary_show(struct seq_file *s, void *data) +static int genpd_summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int pm_genpd_summary_open(struct inode *inode, struct file *file) +static int genpd_status_show(struct seq_file *s, void *data) { - return single_open(file, pm_genpd_summary_show, NULL); + static const char * const status_lookup[] = { + [GPD_STATE_ACTIVE] = "on", + [GPD_STATE_POWER_OFF] = "off" + }; + + struct generic_pm_domain *genpd = s->private; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + + if (genpd->status == GPD_STATE_POWER_OFF) + seq_printf(s, "%s-%u\n", status_lookup[genpd->status], + genpd->state_idx); + else + seq_printf(s, "%s\n", status_lookup[genpd->status]); +exit: + genpd_unlock(genpd); + return ret; } -static const struct file_operations pm_genpd_summary_fops = { - .open = pm_genpd_summary_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +static int genpd_sub_domains_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct gpd_link *link; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(link, &genpd->master_links, master_node) + seq_printf(s, "%s\n", link->slave->name); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_idle_states_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + seq_puts(s, "State Time Spent(ms)\n"); + + for (i = 0; i < genpd->state_count; i++) { + ktime_t delta = 0; + s64 msecs; + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + msecs = ktime_to_ms( + ktime_add(genpd->states[i].idle_time, delta)); + seq_printf(s, "S%-13i %lld\n", i, msecs); + } + + genpd_unlock(genpd); + return ret; +} + +static int genpd_active_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (genpd->status == GPD_STATE_ACTIVE) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + seq_printf(s, "%lld ms\n", ktime_to_ms( + ktime_add(genpd->on_time, delta))); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_total_idle_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0, total = 0; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + for (i = 0; i < genpd->state_count; i++) { + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + total = ktime_add(total, genpd->states[i].idle_time); + } + total = ktime_add(total, delta); + + seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + + genpd_unlock(genpd); + return ret; +} + + +static int genpd_devices_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct pm_domain_data *pm_data; + const char *kobj_path; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "%s\n", kobj_path); + kfree(kobj_path); + } + + genpd_unlock(genpd); + return ret; +} + +#define define_genpd_open_function(name) \ +static int genpd_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, genpd_##name##_show, inode->i_private); \ +} + +define_genpd_open_function(summary); +define_genpd_open_function(status); +define_genpd_open_function(sub_domains); +define_genpd_open_function(idle_states); +define_genpd_open_function(active_time); +define_genpd_open_function(total_idle_time); +define_genpd_open_function(devices); + +#define define_genpd_debugfs_fops(name) \ +static const struct file_operations genpd_##name##_fops = { \ + .open = genpd_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +define_genpd_debugfs_fops(summary); +define_genpd_debugfs_fops(status); +define_genpd_debugfs_fops(sub_domains); +define_genpd_debugfs_fops(idle_states); +define_genpd_debugfs_fops(active_time); +define_genpd_debugfs_fops(total_idle_time); +define_genpd_debugfs_fops(devices); static int __init pm_genpd_debug_init(void) { struct dentry *d; + struct generic_pm_domain *genpd; pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); @@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); + pm_genpd_debugfs_dir, NULL, &genpd_summary_fops); if (!d) return -ENOMEM; + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir); + if (!d) + return -ENOMEM; + + debugfs_create_file("current_state", 0444, + d, genpd, &genpd_status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &genpd_sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &genpd_idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &genpd_active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &genpd_total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &genpd_devices_fops); + } + return 0; } late_initcall(pm_genpd_debug_init); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index c99f8730de82..ea1732ed7a9d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, dev_name(dev), pm_verb(state.event), info, error); } -#ifdef CONFIG_PM_DEBUG -static void dpm_show_time(ktime_t starttime, pm_message_t state, +static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, const char *info) { ktime_t calltime; @@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, usecs = usecs64; if (usecs == 0) usecs = 1; - pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", - info ?: "", info ? " " : "", pm_verb(state.event), - usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); + + pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + error ? "aborted" : "complete", + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } -#else -static inline void dpm_show_time(ktime_t starttime, pm_message_t state, - const char *info) {} -#endif /* CONFIG_PM_DEBUG */ static int dpm_run_callback(pm_callback_t cb, struct device *dev, pm_message_t state, const char *info) @@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie) put_device(dev); } -/** - * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Call the "noirq" resume handlers for all devices in dpm_noirq_list and - * enable device drivers to receive interrupts. - */ -void dpm_resume_noirq(pm_message_t state) +void dpm_noirq_resume_devices(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); @@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "noirq"); + dpm_show_time(starttime, state, 0, "noirq"); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +void dpm_noirq_end(void) +{ resume_device_irqs(); device_wakeup_disarm_wake_irqs(); cpuidle_resume(); - trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +/** + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and + * allow device drivers' interrupt handlers to be called. + */ +void dpm_resume_noirq(pm_message_t state) +{ + dpm_noirq_resume_devices(state); + dpm_noirq_end(); } /** @@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, "early"); + dpm_show_time(starttime, state, 0, "early"); trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } @@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state) } mutex_unlock(&dpm_list_mtx); async_synchronize_full(); - dpm_show_time(starttime, state, NULL); + dpm_show_time(starttime, state, 0, NULL); cpufreq_resume(); trace_suspend_resume(TPS("dpm_resume"), state.event, false); @@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (async_error) goto Complete; + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + if (dev->power.syscore || dev->power.direct_complete) goto Complete; @@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev) return __device_suspend_noirq(dev, pm_transition, false); } -/** - * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. - * @state: PM transition of the system being carried out. - * - * Prevent device drivers from receiving interrupts and call the "noirq" suspend - * handlers for all non-sysdev devices. - */ -int dpm_suspend_noirq(pm_message_t state) +void dpm_noirq_begin(void) +{ + cpuidle_pause(); + device_wakeup_arm_wake_irqs(); + suspend_device_irqs(); +} + +int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); - cpuidle_pause(); - device_wakeup_arm_wake_irqs(); - suspend_device_irqs(); mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -1208,15 +1217,32 @@ int dpm_suspend_noirq(pm_message_t state) if (error) { suspend_stats.failed_suspend_noirq++; dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); - dpm_resume_noirq(resume_event(state)); - } else { - dpm_show_time(starttime, state, "noirq"); } + dpm_show_time(starttime, state, error, "noirq"); trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; } /** + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Prevent device drivers' interrupt handlers from being called and invoke + * "noirq" suspend callbacks for all non-sysdev devices. + */ +int dpm_suspend_noirq(pm_message_t state) +{ + int ret; + + dpm_noirq_begin(); + ret = dpm_noirq_suspend_devices(state); + if (ret) + dpm_resume_noirq(resume_event(state)); + + return ret; +} + +/** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. @@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state) suspend_stats.failed_suspend_late++; dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - } else { - dpm_show_time(starttime, state, "late"); } + dpm_show_time(starttime, state, error, "late"); trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); return error; } @@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state) if (error) { suspend_stats.failed_suspend++; dpm_save_failed_step(SUSPEND_SUSPEND); - } else - dpm_show_time(starttime, state, NULL); + } + dpm_show_time(starttime, state, error, NULL); trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; } diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 57eec1ca0569..0b718886479b 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); -/* Returns opp descriptor node for a device, caller must do of_node_put() */ -struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +/* Returns opp descriptor node for a device node, caller must + * do of_node_put() */ +static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) { /* * There should be only ONE phandle present in "operating-points-v2" * property. */ - return of_parse_phandle(dev->of_node, "operating-points-v2", 0); + return of_parse_phandle(np, "operating-points-v2", 0); +} + +/* Returns opp descriptor node for a device, caller must do of_node_put() */ +struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) +{ + return _opp_of_get_opp_desc_node(dev->of_node); } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); @@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) ret = dev_pm_opp_of_add_table(cpu_dev); if (ret) { - pr_err("%s: couldn't find opp table for cpu:%d, %d\n", - __func__, cpu, ret); + /* + * OPP may get registered dynamically, don't print error + * message here. + */ + pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); /* Free all other OPPs */ dev_pm_opp_of_cpumask_remove_table(cpumask); @@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np; - struct device *tcpu_dev; + struct device_node *np, *tmp_np, *cpu_np; int cpu, ret = 0; /* Get OPP descriptor node */ @@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, if (cpu == cpu_dev->id) continue; - tcpu_dev = get_cpu_device(cpu); - if (!tcpu_dev) { - dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + cpu_np = of_get_cpu_node(cpu, NULL); + if (!cpu_np) { + dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENODEV; + ret = -ENOENT; goto put_cpu_node; } /* Get OPP descriptor node */ - tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); + tmp_np = _opp_of_get_opp_desc_node(cpu_np); if (!tmp_np) { - dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", - __func__); + pr_err("%pOF: Couldn't find opp node\n", cpu_np); ret = -ENOENT; goto put_cpu_node; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..cdd6f256da59 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; + dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { - if (wakeup_sysfs_add(dev)) - return; + int ret = wakeup_sysfs_add(dev); + + if (ret) + dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } - dev->power.can_wakeup = capable; } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); @@ -863,7 +865,7 @@ bool pm_wakeup_pending(void) void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); - freeze_wake(); + s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c index 5f04e7bf063e..e6c64b0be5b2 100644 --- a/drivers/base/regmap/regmap-w1.c +++ b/drivers/base/regmap/regmap-w1.c @@ -1,7 +1,7 @@ /* * Register map access API - W1 (1-Wire) support * - * Copyright (C) 2017 OAO Radioavionica + * Copyright (c) 2017 Radioavionica Corporation * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> * * This program is free software; you can redistribute it and/or modify @@ -11,7 +11,7 @@ #include <linux/regmap.h> #include <linux/module.h> -#include "../../w1/w1.h" +#include <linux/w1.h> #include "internal.h" diff --git a/drivers/base/topology.c b/drivers/base/topology.c index d6ec1c546f5b..d936fcf9f1fb 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -105,7 +105,7 @@ static struct attribute *default_attrs[] = { NULL }; -static struct attribute_group topology_attr_group = { +static const struct attribute_group topology_attr_group = { .attrs = default_attrs, .name = "topology" }; |