diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-25 13:47:41 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-25 13:47:41 -0800 |
commit | 1e2af254ef130e37d9fb3cb1bc9bfbf6ea184b4a (patch) | |
tree | da653d3e38f54ed497d3fae7d7f223fdbedb4ac8 /drivers | |
parent | b271b2127e6654a72dc1685f0825fe1cc2f36939 (diff) | |
parent | a465d38fa3dce6a0dc2d5814cb3aa7b0d2982c6b (diff) | |
download | linux-1e2af254ef130e37d9fb3cb1bc9bfbf6ea184b4a.tar.bz2 |
Merge tag 'pm-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki:
"These add sysadmin documentation for cpuidle, extend the cpuidle
subsystem somewhat, improve the handling of performance states in the
generic power domains (genpd) and operating performance points (OPP)
frameworks, add a new cpufreq driver for Qualcomm SoCs, update some
other cpufreq drivers, switch over the runtime PM framework to using
high-res timers for device autosuspend, fix a problem with
suspend-to-idle on ACPI-based platforms, add system-wide suspend and
resume handling to the devfreq framework, do some janitorial cleanups
all over and update some utilities.
Specifics:
- Add sysadmin documentation for cpuidle (Rafael Wysocki).
- Make it possible to specify a cpuidle governor from kernel command
line, add new cpuidle state sysfs attributes for governor
evaluation, and improve the "polling" idle state handling (Rafael
Wysocki).
- Fix the handling of the "required-opps" DT property in the
operating performance points (OPP) framework, improve the
integration of it with the generic power domains (genpd) framework,
improve the handling of performance states in them and clean up the
idle states vs performance states separation in genpd (Viresh
Kumar, Ulf Hansson).
- Add a cpufreq driver called "qcom-hw" for Qualcomm SoCs using a
hardware engine to control CPU frequency transitions along with DT
bindings for it (Taniya Das).
- Fix an intel_pstate driver issue related to CPU offline and update
the documentation of it (Srinivas Pandruvada).
- Clean up the imx6q cpufreq driver (Anson Huang).
- Add SPDX license IDs to cpufreq schedutil governor files (Daniel
Lezcano).
- Switch over the runtime PM framework to using high-res timers for
device autosuspend to allow the control of it to be more precise
(Vincent Guittot).
- Disable non-wakeup ACPI GPEs during suspend-to-idle so that they
don't prevent the system from reaching the target low-power state
and simplify the suspend-to-idle handling on ACPI platforms without
full Low-Power S0 Idle (LPS0) support (Rafael Wysocki).
- Add system-wide suspend and resume support to the devfreq framework
(Lukasz Luba).
- Clean up the SmartReflex adaptive voltage scaling (AVS) driver and
add an SPDX license ID to it (Nishanth Menon, Uwe Kleine-König,
Thomas Meyer).
- Get rid of code duplication by using the DEFINE_SHOW_ATTRIBUTE
macro in some places, fix some DT node refcount leaks, and do some
other janitorial cleanups (Yangtao Li).
- Update the cpupower, intel_pstate_tracer and turbosat utilities
(Abhishek Goel, Doug Smythies, Len Brown)"
* tag 'pm-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (54 commits)
PM / Domains: remove define_genpd_open_function() and define_genpd_debugfs_fops()
PM-runtime: Switch autosuspend over to using hrtimers
cpufreq: qcom-hw: Add support for QCOM cpufreq HW driver
dt-bindings: cpufreq: Introduce QCOM cpufreq firmware bindings
ACPI: PM: Loop in full LPS0 mode only
ACPI: EC / PM: Disable non-wakeup GPEs for suspend-to-idle
tools/power/x86/intel_pstate_tracer: Fix non root execution for post processing a trace file
tools/power turbostat: consolidate duplicate model numbers
tools/power turbostat: fix goldmont C-state limit decoding
PM / Domains: Propagate performance state updates
PM / Domains: Factorize dev_pm_genpd_set_performance_state()
PM / Domains: Save OPP table pointer in genpd
OPP: Don't return 0 on error from of_get_required_opp_performance_state()
OPP: Add dev_pm_opp_xlate_performance_state() helper
OPP: Improve _find_table_of_opp_np()
PM / Domains: Make genpd performance states orthogonal to the idlestates
PM / sleep: convert to DEFINE_SHOW_ATTRIBUTE
cpuidle: Add 'above' and 'below' idle state metrics
PM / AVS: SmartReflex: Switch to SPDX Licence ID
PM / AVS: SmartReflex: NULL check before some freeing functions is not needed
...
Diffstat (limited to 'drivers')
26 files changed, 1403 insertions, 440 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d4e5610e09c5..9d66a47d32fb 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1034,6 +1034,18 @@ void acpi_ec_unblock_transactions(void) acpi_ec_start(first_ec, true); } +void acpi_ec_mark_gpe_for_wake(void) +{ + if (first_ec && !ec_no_wakeup) + acpi_mark_gpe_for_wake(NULL, first_ec->gpe); +} + +void acpi_ec_set_gpe_wake_mask(u8 action) +{ + if (first_ec && !ec_no_wakeup) + acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); +} + void acpi_ec_dispatch_gpe(void) { if (first_ec) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 530a3f675490..f59d0b9e2683 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -188,6 +188,8 @@ int acpi_ec_ecdt_probe(void); int acpi_ec_dsdt_probe(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); void acpi_ec_dispatch_gpe(void); int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 754d59f95500..403c4ff15349 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -940,6 +940,8 @@ static int lps0_device_attach(struct acpi_device *adev, acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", bitmask); + + acpi_ec_mark_gpe_for_wake(); } else { acpi_handle_debug(adev->handle, "_DSM function 0 evaluation failed\n"); @@ -968,16 +970,23 @@ static int acpi_s2idle_prepare(void) if (lps0_device_handle) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY); + + acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE); } if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); + /* Change the configuration of GPEs to avoid spurious wakeup. */ + acpi_enable_all_wakeup_gpes(); + acpi_os_wait_events_complete(); return 0; } static void acpi_s2idle_wake(void) { + if (!lps0_device_handle) + return; if (pm_debug_messages_on) lpi_check_constraints(); @@ -996,8 +1005,7 @@ static void acpi_s2idle_wake(void) * takes too much time for EC wakeup events to survive, so look * for them now. */ - if (lps0_device_handle) - acpi_ec_dispatch_gpe(); + acpi_ec_dispatch_gpe(); } } @@ -1017,10 +1025,14 @@ static void acpi_s2idle_sync(void) static void acpi_s2idle_restore(void) { + acpi_enable_all_runtime_gpes(); + if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); if (lps0_device_handle) { + acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE); + acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT); acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON); } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 7f38a92b444a..500de1dee967 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd) static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} #endif +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct generic_pm_domain_data *pd_data; + struct pm_domain_data *pdd; + struct gpd_link *link; + + /* New requested state is same as Max requested state */ + if (state == genpd->performance_state) + return state; + + /* New requested state is higher than Max requested state */ + if (state > genpd->performance_state) + return state; + + /* Traverse all devices within the domain */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + pd_data = to_gpd_data(pdd); + + if (pd_data->performance_state > state) + state = pd_data->performance_state; + } + + /* + * Traverse all sub-domains within the domain. This can be + * done without any additional locking as the link->performance_state + * field is protected by the master genpd->lock, which is already taken. + * + * Also note that link->performance_state (subdomain's performance state + * requirement to master domain) is different from + * link->slave->performance_state (current performance state requirement + * of the devices/sub-domains of the subdomain) and so can have a + * different value. + * + * Note that we also take vote from powered-off sub-domains into account + * as the same is done for devices right now. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + if (link->performance_state > state) + state = link->performance_state; + } + + return state; +} + +static int _genpd_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state, int depth) +{ + struct generic_pm_domain *master; + struct gpd_link *link; + int master_state, ret; + + if (state == genpd->performance_state) + return 0; + + /* Propagate to masters of genpd */ + list_for_each_entry(link, &genpd->slave_links, slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + /* Find master's performance state */ + ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, + master->opp_table, + state); + if (unlikely(ret < 0)) + goto err; + + master_state = ret; + + genpd_lock_nested(master, depth + 1); + + link->prev_performance_state = link->performance_state; + link->performance_state = master_state; + master_state = _genpd_reeval_performance_state(master, + master_state); + ret = _genpd_set_performance_state(master, master_state, depth + 1); + if (ret) + link->performance_state = link->prev_performance_state; + + genpd_unlock(master); + + if (ret) + goto err; + } + + ret = genpd->set_performance_state(genpd, state); + if (ret) + goto err; + + genpd->performance_state = state; + return 0; + +err: + /* Encountered an error, lets rollback */ + list_for_each_entry_continue_reverse(link, &genpd->slave_links, + slave_node) { + master = link->master; + + if (!master->set_performance_state) + continue; + + genpd_lock_nested(master, depth + 1); + + master_state = link->prev_performance_state; + link->performance_state = master_state; + + master_state = _genpd_reeval_performance_state(master, + master_state); + if (_genpd_set_performance_state(master, master_state, depth + 1)) { + pr_err("%s: Failed to roll back to %d performance state\n", + master->name, master_state); + } + + genpd_unlock(master); + } + + return ret; +} + /** * dev_pm_genpd_set_performance_state- Set performance state of device's power * domain. @@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data, *pd_data; - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data; unsigned int prev; - int ret = 0; + int ret; genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) @@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) prev = gpd_data->performance_state; gpd_data->performance_state = state; - /* New requested state is same as Max requested state */ - if (state == genpd->performance_state) - goto unlock; - - /* New requested state is higher than Max requested state */ - if (state > genpd->performance_state) - goto update_state; - - /* Traverse all devices within the domain */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - pd_data = to_gpd_data(pdd); - - if (pd_data->performance_state > state) - state = pd_data->performance_state; - } - - if (state == genpd->performance_state) - goto unlock; - - /* - * We aren't propagating performance state changes of a subdomain to its - * masters as we don't have hardware that needs it. Over that, the - * performance states of subdomain and its masters may not have - * one-to-one mapping and would require additional information. We can - * get back to this once we have hardware that needs it. For that - * reason, we don't have to consider performance state of the subdomains - * of genpd here. - */ - -update_state: - if (genpd_status_on(genpd)) { - ret = genpd->set_performance_state(genpd, state); - if (ret) { - gpd_data->performance_state = prev; - goto unlock; - } - } - - genpd->performance_state = state; + state = _genpd_reeval_performance_state(genpd, state); + ret = _genpd_set_performance_state(genpd, state, 0); + if (ret) + gpd_data->performance_state = prev; -unlock: genpd_unlock(genpd); return ret; @@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) return ret; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); - - if (unlikely(genpd->set_performance_state)) { - ret = genpd->set_performance_state(genpd, genpd->performance_state); - if (ret) { - pr_warn("%s: Failed to set performance state %d (%d)\n", - genpd->name, genpd->performance_state, ret); - } - } - if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) return ret; @@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np, ret); goto unlock; } + + /* + * Save table for faster processing while setting performance + * state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); + WARN_ON(!genpd->opp_table); } ret = genpd_add_provider(np, genpd_xlate_simple, genpd); if (ret) { - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } goto unlock; } @@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np, i, ret); goto error; } + + /* + * Save table for faster processing while setting + * performance state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); + WARN_ON(!genpd->opp_table); } genpd->provider = &np->fwnode; @@ -1989,8 +2080,10 @@ error: genpd->provider = NULL; genpd->has_provider = false; - if (genpd->set_performance_state) + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); dev_pm_opp_of_remove_table(&genpd->dev); + } } mutex_unlock(&gpd_list_lock); @@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np) if (!gpd->set_performance_state) continue; + dev_pm_opp_put_opp_table(gpd->opp_table); dev_pm_opp_of_remove_table(&gpd->dev); } } @@ -2338,7 +2432,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); struct device *genpd_dev_pm_attach_by_id(struct device *dev, unsigned int index) { - struct device *genpd_dev; + struct device *virt_dev; int num_domains; int ret; @@ -2352,31 +2446,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, return NULL; /* Allocate and register device on the genpd bus. */ - genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); - if (!genpd_dev) + virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); + if (!virt_dev) return ERR_PTR(-ENOMEM); - dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); - genpd_dev->bus = &genpd_bus_type; - genpd_dev->release = genpd_release_dev; + dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); + virt_dev->bus = &genpd_bus_type; + virt_dev->release = genpd_release_dev; - ret = device_register(genpd_dev); + ret = device_register(virt_dev); if (ret) { - kfree(genpd_dev); + kfree(virt_dev); return ERR_PTR(ret); } /* Try to attach the device to the PM domain at the specified index. */ - ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false); + ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false); if (ret < 1) { - device_unregister(genpd_dev); + device_unregister(virt_dev); return ret ? ERR_PTR(ret) : NULL; } - pm_runtime_enable(genpd_dev); - genpd_queue_power_off_work(dev_to_genpd(genpd_dev)); + pm_runtime_enable(virt_dev); + genpd_queue_power_off_work(dev_to_genpd(virt_dev)); - return genpd_dev; + return virt_dev; } EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); @@ -2521,52 +2615,36 @@ int of_genpd_parse_idle_states(struct device_node *dn, EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); /** - * of_genpd_opp_to_performance_state- Gets performance state of device's - * power domain corresponding to a DT node's "required-opps" property. + * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. * - * @dev: Device for which the performance-state needs to be found. - * @np: DT node where the "required-opps" property is present. This can be - * the device node itself (if it doesn't have an OPP table) or a node - * within the OPP table of a device (if device has an OPP table). + * @genpd_dev: Genpd's device for which the performance-state needs to be found. + * @opp: struct dev_pm_opp of the OPP for which we need to find performance + * state. * - * Returns performance state corresponding to the "required-opps" property of - * a DT node. This calls platform specific genpd->opp_to_performance_state() - * callback to translate power domain OPP to performance state. + * Returns performance state encoded in the OPP of the genpd. This calls + * platform specific genpd->opp_to_performance_state() callback to translate + * power domain OPP to performance state. * * Returns performance state on success and 0 on failure. */ -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { - struct generic_pm_domain *genpd; - struct dev_pm_opp *opp; - int state = 0; + struct generic_pm_domain *genpd = NULL; + int state; - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return 0; + genpd = container_of(genpd_dev, struct generic_pm_domain, dev); - if (unlikely(!genpd->set_performance_state)) + if (unlikely(!genpd->opp_to_performance_state)) return 0; genpd_lock(genpd); - - opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np); - if (IS_ERR(opp)) { - dev_err(dev, "Failed to find required OPP: %ld\n", - PTR_ERR(opp)); - goto unlock; - } - state = genpd->opp_to_performance_state(genpd, opp); - dev_pm_opp_put(opp); - -unlock: genpd_unlock(genpd); return state; } -EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); +EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); static int __init genpd_bus_init(void) { @@ -2671,7 +2749,7 @@ exit: return 0; } -static int genpd_summary_show(struct seq_file *s, void *data) +static int summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2694,7 +2772,7 @@ static int genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int genpd_status_show(struct seq_file *s, void *data) +static int status_show(struct seq_file *s, void *data) { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", @@ -2721,7 +2799,7 @@ exit: return ret; } -static int genpd_sub_domains_show(struct seq_file *s, void *data) +static int sub_domains_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct gpd_link *link; @@ -2738,7 +2816,7 @@ static int genpd_sub_domains_show(struct seq_file *s, void *data) return ret; } -static int genpd_idle_states_show(struct seq_file *s, void *data) +static int idle_states_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; unsigned int i; @@ -2767,7 +2845,7 @@ static int genpd_idle_states_show(struct seq_file *s, void *data) return ret; } -static int genpd_active_time_show(struct seq_file *s, void *data) +static int active_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; ktime_t delta = 0; @@ -2787,7 +2865,7 @@ static int genpd_active_time_show(struct seq_file *s, void *data) return ret; } -static int genpd_total_idle_time_show(struct seq_file *s, void *data) +static int total_idle_time_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; ktime_t delta = 0, total = 0; @@ -2815,7 +2893,7 @@ static int genpd_total_idle_time_show(struct seq_file *s, void *data) } -static int genpd_devices_show(struct seq_file *s, void *data) +static int devices_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; struct pm_domain_data *pm_data; @@ -2841,7 +2919,7 @@ static int genpd_devices_show(struct seq_file *s, void *data) return ret; } -static int genpd_perf_state_show(struct seq_file *s, void *data) +static int perf_state_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd = s->private; @@ -2854,37 +2932,14 @@ static int genpd_perf_state_show(struct seq_file *s, void *data) return 0; } -#define define_genpd_open_function(name) \ -static int genpd_##name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, genpd_##name##_show, inode->i_private); \ -} - -define_genpd_open_function(summary); -define_genpd_open_function(status); -define_genpd_open_function(sub_domains); -define_genpd_open_function(idle_states); -define_genpd_open_function(active_time); -define_genpd_open_function(total_idle_time); -define_genpd_open_function(devices); -define_genpd_open_function(perf_state); - -#define define_genpd_debugfs_fops(name) \ -static const struct file_operations genpd_##name##_fops = { \ - .open = genpd_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ -} - -define_genpd_debugfs_fops(summary); -define_genpd_debugfs_fops(status); -define_genpd_debugfs_fops(sub_domains); -define_genpd_debugfs_fops(idle_states); -define_genpd_debugfs_fops(active_time); -define_genpd_debugfs_fops(total_idle_time); -define_genpd_debugfs_fops(devices); -define_genpd_debugfs_fops(perf_state); +DEFINE_SHOW_ATTRIBUTE(summary); +DEFINE_SHOW_ATTRIBUTE(status); +DEFINE_SHOW_ATTRIBUTE(sub_domains); +DEFINE_SHOW_ATTRIBUTE(idle_states); +DEFINE_SHOW_ATTRIBUTE(active_time); +DEFINE_SHOW_ATTRIBUTE(total_idle_time); +DEFINE_SHOW_ATTRIBUTE(devices); +DEFINE_SHOW_ATTRIBUTE(perf_state); static int __init genpd_debug_init(void) { @@ -2897,7 +2952,7 @@ static int __init genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - genpd_debugfs_dir, NULL, &genpd_summary_fops); + genpd_debugfs_dir, NULL, &summary_fops); if (!d) return -ENOMEM; @@ -2907,20 +2962,20 @@ static int __init genpd_debug_init(void) return -ENOMEM; debugfs_create_file("current_state", 0444, - d, genpd, &genpd_status_fops); + d, genpd, &status_fops); debugfs_create_file("sub_domains", 0444, - d, genpd, &genpd_sub_domains_fops); + d, genpd, &sub_domains_fops); debugfs_create_file("idle_states", 0444, - d, genpd, &genpd_idle_states_fops); + d, genpd, &idle_states_fops); debugfs_create_file("active_time", 0444, - d, genpd, &genpd_active_time_fops); + d, genpd, &active_time_fops); debugfs_create_file("total_idle_time", 0444, - d, genpd, &genpd_total_idle_time_fops); + d, genpd, &total_idle_time_fops); debugfs_create_file("devices", 0444, - d, genpd, &genpd_devices_fops); + d, genpd, &devices_fops); if (genpd->set_performance_state) debugfs_create_file("perf_state", 0444, - d, genpd, &genpd_perf_state_fops); + d, genpd, &perf_state_fops); } return 0; diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index beb85c31f3fa..70624695b6d5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -8,6 +8,8 @@ */ #include <linux/sched/mm.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> @@ -93,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - del_timer(&dev->power.suspend_timer); + hrtimer_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -124,12 +126,11 @@ static void pm_runtime_cancel_pending(struct device *dev) * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ -unsigned long pm_runtime_autosuspend_expiration(struct device *dev) +u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - long elapsed; - unsigned long last_busy; - unsigned long expires = 0; + u64 last_busy, expires = 0; + u64 now = ktime_to_ns(ktime_get()); if (!dev->power.use_autosuspend) goto out; @@ -139,19 +140,9 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) goto out; last_busy = READ_ONCE(dev->power.last_busy); - elapsed = jiffies - last_busy; - if (elapsed < 0) - goto out; /* jiffies has wrapped around. */ - /* - * If the autosuspend_delay is >= 1 second, align the timer by rounding - * up to the nearest second. - */ - expires = last_busy + msecs_to_jiffies(autosuspend_delay); - if (autosuspend_delay >= 1000) - expires = round_jiffies(expires); - expires += !expires; - if (elapsed >= expires - last_busy) + expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; + if (expires <= now) expires = 0; /* Already expired. */ out: @@ -515,7 +506,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) /* If the autosuspend_delay time hasn't expired yet, reschedule. */ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { - unsigned long expires = pm_runtime_autosuspend_expiration(dev); + u64 expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { /* Pending requests need to be canceled. */ @@ -528,10 +519,20 @@ static int rpm_suspend(struct device *dev, int rpmflags) * expire; pm_suspend_timer_fn() will take care of the * rest. */ - if (!(dev->power.timer_expires && time_before_eq( - dev->power.timer_expires, expires))) { + if (!(dev->power.timer_expires && + dev->power.timer_expires <= expires)) { + /* + * We add a slack of 25% to gather wakeups + * without sacrificing the granularity. + */ + u64 slack = READ_ONCE(dev->power.autosuspend_delay) * + (NSEC_PER_MSEC >> 2); + dev->power.timer_expires = expires; - mod_timer(&dev->power.suspend_timer, expires); + hrtimer_start_range_ns(&dev->power.suspend_timer, + ns_to_ktime(expires), + slack, + HRTIMER_MODE_ABS); } dev->power.timer_autosuspends = 1; goto out; @@ -895,23 +896,25 @@ static void pm_runtime_work(struct work_struct *work) * * Check if the time is right and queue a suspend request. */ -static void pm_suspend_timer_fn(struct timer_list *t) +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) { - struct device *dev = from_timer(dev, t, power.suspend_timer); + struct device *dev = container_of(timer, struct device, power.suspend_timer); unsigned long flags; - unsigned long expires; + u64 expires; spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; /* If 'expire' is after 'jiffies' we've been called too early. */ - if (expires > 0 && !time_after(expires, jiffies)) { + if (expires > 0 && expires < ktime_to_ns(ktime_get())) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); + + return HRTIMER_NORESTART; } /** @@ -922,6 +925,7 @@ static void pm_suspend_timer_fn(struct timer_list *t) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; + ktime_t expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -938,10 +942,10 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); - dev->power.timer_expires += !dev->power.timer_expires; + expires = ktime_add(ktime_get(), ms_to_ktime(delay)); + dev->power.timer_expires = ktime_to_ns(expires); dev->power.timer_autosuspends = 0; - mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); out: spin_unlock_irqrestore(&dev->power.lock, flags); @@ -1491,7 +1495,8 @@ void pm_runtime_init(struct device *dev) INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; - timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + dev->power.suspend_timer.function = pm_suspend_timer_fn; init_waitqueue_head(&dev->power.wait_queue); } diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 4e1131ef85ae..688f10227793 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -114,6 +114,17 @@ config ARM_QCOM_CPUFREQ_KRYO If in doubt, say N. +config ARM_QCOM_CPUFREQ_HW + tristate "QCOM CPUFreq HW driver" + depends on ARCH_QCOM || COMPILE_TEST + help + Support for the CPUFreq HW driver. + Some QCOM chipsets have a HW engine to offload the steps + necessary for changing the frequency of the CPUs. Firmware loaded + in this engine exposes a programming interface to the OS. + The driver implements the cpufreq interface for this HW engine. + Say Y if you want to support CPUFreq HW. + config ARM_S3C_CPUFREQ bool help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index d5ee4562ed06..08c071be2491 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o +obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index dbf82f36d270..33c309a08c64 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -123,8 +123,6 @@ static void nforce2_write_pll(int pll) /* Now write the value in all 64 registers */ for (temp = 0; temp <= 0x3f; temp++) pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll); - - return; } /** @@ -438,4 +436,3 @@ static void __exit nforce2_exit(void) module_init(nforce2_init); module_exit(nforce2_exit); - diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index dd5440d3372d..80c5bf590acb 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <asm/io.h> #include <linux/uaccess.h> #include <asm/pal.h> @@ -28,7 +27,6 @@ MODULE_AUTHOR("Venkatesh Pallipadi"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); - struct cpufreq_acpi_io { struct acpi_processor_performance acpi_data; unsigned int resume; @@ -348,10 +346,7 @@ acpi_cpufreq_exit (void) pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); - return; } - late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit); - diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index d8c3595e9023..9fedf627e000 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -177,22 +177,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) /* scaling down? scale voltage after frequency */ if (new_freq < old_freq) { ret = regulator_set_voltage_tol(arm_reg, volt, 0); - if (ret) { + if (ret) dev_warn(cpu_dev, "failed to scale vddarm down: %d\n", ret); - ret = 0; - } ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); - if (ret) { + if (ret) dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret); - ret = 0; - } if (!IS_ERR(pu_reg)) { ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); - if (ret) { + if (ret) dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret); - ret = 0; - } } } @@ -411,9 +405,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) if (of_machine_is_compatible("fsl,imx6ul") || of_machine_is_compatible("fsl,imx6ull")) { ret = imx6ul_opp_check_speed_grading(cpu_dev); - if (ret == -EPROBE_DEFER) - return ret; if (ret) { + if (ret == -EPROBE_DEFER) + return ret; + dev_err(cpu_dev, "failed to read ocotp: %d\n", ret); return ret; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 9578312e43f2..106402b89961 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -830,6 +830,28 @@ skip_epp: wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } +static void intel_pstate_hwp_force_min_perf(int cpu) +{ + u64 value; + int min_perf; + + value = all_cpu_data[cpu]->hwp_req_cached; + value &= ~GENMASK_ULL(31, 0); + min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached); + + /* Set hwp_max = hwp_min */ + value |= HWP_MAX_PERF(min_perf); + value |= HWP_MIN_PERF(min_perf); + + /* Set EPP/EPB to min */ + if (static_cpu_has(X86_FEATURE_HWP_EPP)) + value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); + else + intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); + + wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); +} + static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) { struct cpudata *cpu_data = all_cpu_data[policy->cpu]; @@ -2084,10 +2106,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) pr_debug("CPU %d exiting\n", policy->cpu); intel_pstate_clear_update_util_hook(policy->cpu); - if (hwp_active) + if (hwp_active) { intel_pstate_hwp_save_state(policy); - else + intel_pstate_hwp_force_min_perf(policy->cpu); + } else { intel_cpufreq_stop_cpu(policy); + } } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index be623dd7b9f2..1d32a863332d 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -411,6 +411,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode) pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); pfunc_vdnap0_complete = pmf_find_function(root, "slewing-done"); + of_node_put(root); if (pfunc_set_vdnap0 == NULL || pfunc_vdnap0_complete == NULL) { pr_err("Can't find required platform function\n"); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index bf6519cf64bc..7e7ad3879c4e 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -253,18 +253,18 @@ static int init_powernv_pstates(void) if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) { pr_warn("ibm,pstate-min node not found\n"); - return -ENODEV; + goto out; } if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) { pr_warn("ibm,pstate-max node not found\n"); - return -ENODEV; + goto out; } if (of_property_read_u32(power_mgt, "ibm,pstate-nominal", &pstate_nominal)) { pr_warn("ibm,pstate-nominal not found\n"); - return -ENODEV; + goto out; } if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo", @@ -293,14 +293,14 @@ next: pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); if (!pstate_ids) { pr_warn("ibm,pstate-ids not found\n"); - return -ENODEV; + goto out; } pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz", &len_freqs); if (!pstate_freqs) { pr_warn("ibm,pstate-frequencies-mhz not found\n"); - return -ENODEV; + goto out; } if (len_ids != len_freqs) { @@ -311,7 +311,7 @@ next: nr_pstates = min(len_ids, len_freqs) / sizeof(u32); if (!nr_pstates) { pr_warn("No PStates found\n"); - return -ENODEV; + goto out; } powernv_pstate_info.nr_pstates = nr_pstates; @@ -352,7 +352,12 @@ next: /* End of list marker entry */ powernv_freqs[i].frequency = CPUFREQ_TABLE_END; + + of_node_put(power_mgt); return 0; +out: + of_node_put(power_mgt); + return -ENODEV; } /* Returns the CPU frequency corresponding to the pstate_id. */ diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c new file mode 100644 index 000000000000..d83939a1b3d4 --- /dev/null +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/cpufreq.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/slab.h> + +#define LUT_MAX_ENTRIES 40U +#define LUT_SRC GENMASK(31, 30) +#define LUT_L_VAL GENMASK(7, 0) +#define LUT_CORE_COUNT GENMASK(18, 16) +#define LUT_ROW_SIZE 32 +#define CLK_HW_DIV 2 + +/* Register offsets */ +#define REG_ENABLE 0x0 +#define REG_LUT_TABLE 0x110 +#define REG_PERF_STATE 0x920 + +static unsigned long cpu_hw_rate, xo_rate; +static struct platform_device *global_pdev; + +static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, + unsigned int index) +{ + void __iomem *perf_state_reg = policy->driver_data; + + writel_relaxed(index, perf_state_reg); + + return 0; +} + +static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) +{ + void __iomem *perf_state_reg; + struct cpufreq_policy *policy; + unsigned int index; + + policy = cpufreq_cpu_get_raw(cpu); + if (!policy) + return 0; + + perf_state_reg = policy->driver_data; + + index = readl_relaxed(perf_state_reg); + index = min(index, LUT_MAX_ENTRIES - 1); + + return policy->freq_table[index].frequency; +} + +static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + void __iomem *perf_state_reg = policy->driver_data; + int index; + + index = policy->cached_resolved_idx; + if (index < 0) + return 0; + + writel_relaxed(index, perf_state_reg); + + return policy->freq_table[index].frequency; +} + +static int qcom_cpufreq_hw_read_lut(struct device *dev, + struct cpufreq_policy *policy, + void __iomem *base) +{ + u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; + unsigned int max_cores = cpumask_weight(policy->cpus); + struct cpufreq_frequency_table *table; + + table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (i = 0; i < LUT_MAX_ENTRIES; i++) { + data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE); + src = FIELD_GET(LUT_SRC, data); + lval = FIELD_GET(LUT_L_VAL, data); + core_count = FIELD_GET(LUT_CORE_COUNT, data); + + if (src) + freq = xo_rate * lval / 1000; + else + freq = cpu_hw_rate / 1000; + + /* Ignore boosts in the middle of the table */ + if (core_count != max_cores) { + table[i].frequency = CPUFREQ_ENTRY_INVALID; + } else { + table[i].frequency = freq; + dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i, + freq, core_count); + } + + /* + * Two of the same frequencies with the same core counts means + * end of table + */ + if (i > 0 && prev_freq == freq && prev_cc == core_count) { + struct cpufreq_frequency_table *prev = &table[i - 1]; + + /* + * Only treat the last frequency that might be a boost + * as the boost frequency + */ + if (prev_cc != max_cores) { + prev->frequency = prev_freq; + prev->flags = CPUFREQ_BOOST_FREQ; + } + + break; + } + + prev_cc = core_count; + prev_freq = freq; + } + + table[i].frequency = CPUFREQ_TABLE_END; + policy->freq_table = table; + + return 0; +} + +static void qcom_get_related_cpus(int index, struct cpumask *m) +{ + struct device_node *cpu_np; + struct of_phandle_args args; + int cpu, ret; + + for_each_possible_cpu(cpu) { + cpu_np = of_cpu_device_node_get(cpu); + if (!cpu_np) + continue; + + ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain", + "#freq-domain-cells", 0, + &args); + of_node_put(cpu_np); + if (ret < 0) + continue; + + if (index == args.args[0]) + cpumask_set_cpu(cpu, m); + } +} + +static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) +{ + struct device *dev = &global_pdev->dev; + struct of_phandle_args args; + struct device_node *cpu_np; + struct resource *res; + void __iomem *base; + int ret, index; + + cpu_np = of_cpu_device_node_get(policy->cpu); + if (!cpu_np) + return -EINVAL; + + ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain", + "#freq-domain-cells", 0, &args); + of_node_put(cpu_np); + if (ret) + return ret; + + index = args.args[0]; + + res = platform_get_resource(global_pdev, IORESOURCE_MEM, index); + if (!res) + return -ENODEV; + + base = devm_ioremap(dev, res->start, resource_size(res)); + if (!base) + return -ENOMEM; + + /* HW should be in enabled state to proceed */ + if (!(readl_relaxed(base + REG_ENABLE) & 0x1)) { + dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index); + ret = -ENODEV; + goto error; + } + + qcom_get_related_cpus(index, policy->cpus); + if (!cpumask_weight(policy->cpus)) { + dev_err(dev, "Domain-%d failed to get related CPUs\n", index); + ret = -ENOENT; + goto error; + } + + policy->driver_data = base + REG_PERF_STATE; + + ret = qcom_cpufreq_hw_read_lut(dev, policy, base); + if (ret) { + dev_err(dev, "Domain-%d failed to read LUT\n", index); + goto error; + } + + policy->fast_switch_possible = true; + + return 0; +error: + devm_iounmap(dev, base); + return ret; +} + +static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) +{ + void __iomem *base = policy->driver_data - REG_PERF_STATE; + + kfree(policy->freq_table); + devm_iounmap(&global_pdev->dev, base); + + return 0; +} + +static struct freq_attr *qcom_cpufreq_hw_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + &cpufreq_freq_attr_scaling_boost_freqs, + NULL +}; + +static struct cpufreq_driver cpufreq_qcom_hw_driver = { + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_HAVE_GOVERNOR_PER_POLICY, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = qcom_cpufreq_hw_target_index, + .get = qcom_cpufreq_hw_get, + .init = qcom_cpufreq_hw_cpu_init, + .exit = qcom_cpufreq_hw_cpu_exit, + .fast_switch = qcom_cpufreq_hw_fast_switch, + .name = "qcom-cpufreq-hw", + .attr = qcom_cpufreq_hw_attr, +}; + +static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) +{ + struct clk *clk; + int ret; + + clk = clk_get(&pdev->dev, "xo"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + xo_rate = clk_get_rate(clk); + clk_put(clk); + + clk = clk_get(&pdev->dev, "alternate"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV; + clk_put(clk); + + global_pdev = pdev; + + ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver); + if (ret) + dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n"); + else + dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n"); + + return ret; +} + +static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev) +{ + return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver); +} + +static const struct of_device_id qcom_cpufreq_hw_match[] = { + { .compatible = "qcom,cpufreq-hw" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match); + +static struct platform_driver qcom_cpufreq_hw_driver = { + .probe = qcom_cpufreq_hw_driver_probe, + .remove = qcom_cpufreq_hw_driver_remove, + .driver = { + .name = "qcom-cpufreq-hw", + .of_match_table = qcom_cpufreq_hw_match, + }, +}; + +static int __init qcom_cpufreq_hw_init(void) +{ + return platform_driver_register(&qcom_cpufreq_hw_driver); +} +subsys_initcall(qcom_cpufreq_hw_init); + +static void __exit qcom_cpufreq_hw_exit(void) +{ + platform_driver_unregister(&qcom_cpufreq_hw_driver); +} +module_exit(qcom_cpufreq_hw_exit); + +MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c index 4d976e8dbb2f..0df87b6480fe 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c +++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c @@ -63,18 +63,7 @@ static int board_show(struct seq_file *seq, void *p) return 0; } -static int fops_board_open(struct inode *inode, struct file *file) -{ - return single_open(file, board_show, NULL); -} - -static const struct file_operations fops_board = { - .open = fops_board_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(board); static int info_show(struct seq_file *seq, void *p) { @@ -105,18 +94,7 @@ static int info_show(struct seq_file *seq, void *p) return 0; } -static int fops_info_open(struct inode *inode, struct file *file) -{ - return single_open(file, info_show, NULL); -} - -static const struct file_operations fops_info = { - .open = fops_info_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(info); static int io_show(struct seq_file *seq, void *p) { @@ -162,19 +140,7 @@ static int io_show(struct seq_file *seq, void *p) return 0; } -static int fops_io_open(struct inode *inode, struct file *file) -{ - return single_open(file, io_show, NULL); -} - -static const struct file_operations fops_io = { - .open = fops_io_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; - +DEFINE_SHOW_ATTRIBUTE(io); static int __init s3c_freq_debugfs_init(void) { @@ -185,13 +151,13 @@ static int __init s3c_freq_debugfs_init(void) } dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root, - NULL, &fops_io); + NULL, &io_fops); dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root, - NULL, &fops_info); + NULL, &info_fops); dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root, - NULL, &fops_board); + NULL, &board_fops); return 0; } diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index db2ede565f1a..b44476a1b7ad 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -167,6 +167,7 @@ static int __init bl_idle_init(void) { int ret; struct device_node *root = of_find_node_by_path("/"); + const struct of_device_id *match_id; if (!root) return -ENODEV; @@ -174,7 +175,11 @@ static int __init bl_idle_init(void) /* * Initialize the driver just for a compliant set of machines */ - if (!of_match_node(compatible_machine_match, root)) + match_id = of_match_node(compatible_machine_match, root); + + of_node_put(root); + + if (!match_id) return -ENODEV; if (!mcpm_is_available()) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 4a97446f66d8..7f108309e871 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -202,7 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, struct cpuidle_state *target_state = &drv->states[index]; bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); ktime_t time_start, time_end; - s64 diff; /* * Tell the time framework to switch to a broadcast timer because our @@ -248,6 +247,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, local_irq_enable(); if (entered_state >= 0) { + s64 diff, delay = drv->states[entered_state].exit_latency; + int i; + /* * Update cpuidle counters * This can be moved to within driver enter routine, @@ -260,6 +262,33 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, dev->last_residency = (int)diff; dev->states_usage[entered_state].time += dev->last_residency; dev->states_usage[entered_state].usage++; + + if (diff < drv->states[entered_state].target_residency) { + for (i = entered_state - 1; i >= 0; i--) { + if (drv->states[i].disabled || + dev->states_usage[i].disable) + continue; + + /* Shallower states are enabled, so update. */ + dev->states_usage[entered_state].above++; + break; + } + } else if (diff > delay) { + for (i = entered_state + 1; i < drv->state_count; i++) { + if (drv->states[i].disabled || + dev->states_usage[i].disable) + continue; + + /* + * Update if a deeper state would have been a + * better match for the observed idle duration. + */ + if (diff - delay >= drv->states[i].target_residency) + dev->states_usage[entered_state].below++; + + break; + } + } } else { dev->last_residency = 0; } @@ -702,4 +731,5 @@ static int __init cpuidle_init(void) } module_param(off, int, 0444); +module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444); core_initcall(cpuidle_init); diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 2965ab32a583..d6613101af92 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -7,6 +7,7 @@ #define __DRIVER_CPUIDLE_H /* For internal use only */ +extern char param_governor[]; extern struct cpuidle_governor *cpuidle_curr_governor; extern struct list_head cpuidle_governors; extern struct list_head cpuidle_detected_devices; diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 9fed1b829292..bb93e5cf6a4a 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -11,10 +11,13 @@ #include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/mutex.h> +#include <linux/module.h> #include <linux/pm_qos.h> #include "cpuidle.h" +char param_governor[CPUIDLE_NAME_LEN]; + LIST_HEAD(cpuidle_governors); struct cpuidle_governor *cpuidle_curr_governor; @@ -86,9 +89,11 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) mutex_lock(&cpuidle_lock); if (__cpuidle_find_governor(gov->name) == NULL) { ret = 0; - list_add_tail(&gov->governor_list, &cpuidle_governors); if (!cpuidle_curr_governor || - cpuidle_curr_governor->rating < gov->rating) + !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) || + (cpuidle_curr_governor->rating < gov->rating && + strncasecmp(param_governor, cpuidle_curr_governor->name, + CPUIDLE_NAME_LEN))) cpuidle_switch_governor(gov); } mutex_unlock(&cpuidle_lock); diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index 85792d371add..b17d153e724f 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -20,8 +20,17 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, local_irq_enable(); if (!current_set_polling_and_test()) { - u64 limit = (u64)drv->states[1].target_residency * NSEC_PER_USEC; unsigned int loop_count = 0; + u64 limit = TICK_USEC; + int i; + + for (i = 1; i < drv->state_count; i++) { + if (drv->states[i].disabled || dev->states_usage[i].disable) + continue; + + limit = (u64)drv->states[i].target_residency * NSEC_PER_USEC; + break; + } while (!need_resched()) { cpu_relax(); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index e754c7aae7f7..eb20adb5de23 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -301,6 +301,8 @@ define_show_state_str_function(name) define_show_state_str_function(desc) define_show_state_ull_function(disable) define_store_state_ull_function(disable) +define_show_state_ull_function(above) +define_show_state_ull_function(below) define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); @@ -310,6 +312,8 @@ define_one_state_ro(power, show_state_power_usage); define_one_state_ro(usage, show_state_usage); define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); +define_one_state_ro(above, show_state_above); +define_one_state_ro(below, show_state_below); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -320,6 +324,8 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_usage.attr, &attr_time.attr, &attr_disable.attr, + &attr_above.attr, + &attr_below.attr, NULL }; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 141413067b5c..0ae3de76833b 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -285,6 +285,44 @@ static int devfreq_notify_transition(struct devfreq *devfreq, return 0; } +static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq, + u32 flags) +{ + struct devfreq_freqs freqs; + unsigned long cur_freq; + int err = 0; + + if (devfreq->profile->get_cur_freq) + devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); + else + cur_freq = devfreq->previous_freq; + + freqs.old = cur_freq; + freqs.new = new_freq; + devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); + + err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags); + if (err) { + freqs.new = cur_freq; + devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); + return err; + } + + freqs.new = new_freq; + devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); + + if (devfreq_update_status(devfreq, new_freq)) + dev_err(&devfreq->dev, + "Couldn't update frequency transition information.\n"); + + devfreq->previous_freq = new_freq; + + if (devfreq->suspend_freq) + devfreq->resume_freq = cur_freq; + + return err; +} + /* Load monitoring helper functions for governors use */ /** @@ -296,8 +334,7 @@ static int devfreq_notify_transition(struct devfreq *devfreq, */ int update_devfreq(struct devfreq *devfreq) { - struct devfreq_freqs freqs; - unsigned long freq, cur_freq, min_freq, max_freq; + unsigned long freq, min_freq, max_freq; int err = 0; u32 flags = 0; @@ -333,31 +370,8 @@ int update_devfreq(struct devfreq *devfreq) flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ } - if (devfreq->profile->get_cur_freq) - devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq); - else - cur_freq = devfreq->previous_freq; + return devfreq_set_target(devfreq, freq, flags); - freqs.old = cur_freq; - freqs.new = freq; - devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); - - err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); - if (err) { - freqs.new = cur_freq; - devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); - return err; - } - - freqs.new = freq; - devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); - - if (devfreq_update_status(devfreq, freq)) - dev_err(&devfreq->dev, - "Couldn't update frequency transition information.\n"); - - devfreq->previous_freq = freq; - return err; } EXPORT_SYMBOL(update_devfreq); @@ -657,6 +671,9 @@ struct devfreq *devfreq_add_device(struct device *dev, } devfreq->max_freq = devfreq->scaling_max_freq; + devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); + atomic_set(&devfreq->suspend_count, 0); + dev_set_name(&devfreq->dev, "devfreq%d", atomic_inc_return(&devfreq_no)); err = device_register(&devfreq->dev); @@ -857,14 +874,28 @@ EXPORT_SYMBOL(devm_devfreq_remove_device); */ int devfreq_suspend_device(struct devfreq *devfreq) { + int ret; + if (!devfreq) return -EINVAL; - if (!devfreq->governor) + if (atomic_inc_return(&devfreq->suspend_count) > 1) return 0; - return devfreq->governor->event_handler(devfreq, - DEVFREQ_GOV_SUSPEND, NULL); + if (devfreq->governor) { + ret = devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_SUSPEND, NULL); + if (ret) + return ret; + } + + if (devfreq->suspend_freq) { + ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0); + if (ret) + return ret; + } + + return 0; } EXPORT_SYMBOL(devfreq_suspend_device); @@ -878,18 +909,76 @@ EXPORT_SYMBOL(devfreq_suspend_device); */ int devfreq_resume_device(struct devfreq *devfreq) { + int ret; + if (!devfreq) return -EINVAL; - if (!devfreq->governor) + if (atomic_dec_return(&devfreq->suspend_count) >= 1) return 0; - return devfreq->governor->event_handler(devfreq, - DEVFREQ_GOV_RESUME, NULL); + if (devfreq->resume_freq) { + ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0); + if (ret) + return ret; + } + + if (devfreq->governor) { + ret = devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_RESUME, NULL); + if (ret) + return ret; + } + + return 0; } EXPORT_SYMBOL(devfreq_resume_device); /** + * devfreq_suspend() - Suspend devfreq governors and devices + * + * Called during system wide Suspend/Hibernate cycles for suspending governors + * and devices preserving the state for resume. On some platforms the devfreq + * device must have precise state (frequency) after resume in order to provide + * fully operating setup. + */ +void devfreq_suspend(void) +{ + struct devfreq *devfreq; + int ret; + + mutex_lock(&devfreq_list_lock); + list_for_each_entry(devfreq, &devfreq_list, node) { + ret = devfreq_suspend_device(devfreq); + if (ret) + dev_err(&devfreq->dev, + "failed to suspend devfreq device\n"); + } + mutex_unlock(&devfreq_list_lock); +} + +/** + * devfreq_resume() - Resume devfreq governors and devices + * + * Called during system wide Suspend/Hibernate cycle for resuming governors and + * devices that are suspended with devfreq_suspend(). + */ +void devfreq_resume(void) +{ + struct devfreq *devfreq; + int ret; + + mutex_lock(&devfreq_list_lock); + list_for_each_entry(devfreq, &devfreq_list, node) { + ret = devfreq_resume_device(devfreq); + if (ret) + dev_warn(&devfreq->dev, + "failed to resume devfreq device\n"); + } + mutex_unlock(&devfreq_list_lock); +} + +/** * devfreq_add_governor() - Add devfreq governor * @governor: the devfreq governor to be added */ diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 2c2df4e4fc14..e5507add8f04 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -196,12 +196,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) if (IS_ERR(opp_table)) return 0; - count = opp_table->regulator_count; - /* Regulator may not be required for the device */ - if (!count) + if (!opp_table->regulators) goto put_opp_table; + count = opp_table->regulator_count; + uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) goto put_opp_table; @@ -548,44 +548,6 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk, return ret; } -static inline int -_generic_set_opp_domain(struct device *dev, struct clk *clk, - unsigned long old_freq, unsigned long freq, - unsigned int old_pstate, unsigned int new_pstate) -{ - int ret; - - /* Scaling up? Scale domain performance state before frequency */ - if (freq > old_freq) { - ret = dev_pm_genpd_set_performance_state(dev, new_pstate); - if (ret) - return ret; - } - - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); - if (ret) - goto restore_domain_state; - - /* Scaling down? Scale domain performance state after frequency */ - if (freq < old_freq) { - ret = dev_pm_genpd_set_performance_state(dev, new_pstate); - if (ret) - goto restore_freq; - } - - return 0; - -restore_freq: - if (_generic_set_opp_clk_only(dev, clk, freq, old_freq)) - dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", - __func__, old_freq); -restore_domain_state: - if (freq > old_freq) - dev_pm_genpd_set_performance_state(dev, old_pstate); - - return ret; -} - static int _generic_set_opp_regulator(const struct opp_table *opp_table, struct device *dev, unsigned long old_freq, @@ -635,6 +597,84 @@ restore_voltage: return ret; } +static int _set_opp_custom(const struct opp_table *opp_table, + struct device *dev, unsigned long old_freq, + unsigned long freq, + struct dev_pm_opp_supply *old_supply, + struct dev_pm_opp_supply *new_supply) +{ + struct dev_pm_set_opp_data *data; + int size; + + data = opp_table->set_opp_data; + data->regulators = opp_table->regulators; + data->regulator_count = opp_table->regulator_count; + data->clk = opp_table->clk; + data->dev = dev; + + data->old_opp.rate = old_freq; + size = sizeof(*old_supply) * opp_table->regulator_count; + if (IS_ERR(old_supply)) + memset(data->old_opp.supplies, 0, size); + else + memcpy(data->old_opp.supplies, old_supply, size); + + data->new_opp.rate = freq; + memcpy(data->new_opp.supplies, new_supply, size); + + return opp_table->set_opp(data); +} + +/* This is only called for PM domain for now */ +static int _set_required_opps(struct device *dev, + struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + struct opp_table **required_opp_tables = opp_table->required_opp_tables; + struct device **genpd_virt_devs = opp_table->genpd_virt_devs; + unsigned int pstate; + int i, ret = 0; + + if (!required_opp_tables) + return 0; + + /* Single genpd case */ + if (!genpd_virt_devs) { + pstate = opp->required_opps[0]->pstate; + ret = dev_pm_genpd_set_performance_state(dev, pstate); + if (ret) { + dev_err(dev, "Failed to set performance state of %s: %d (%d)\n", + dev_name(dev), pstate, ret); + } + return ret; + } + + /* Multiple genpd case */ + + /* + * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev + * after it is freed from another thread. + */ + mutex_lock(&opp_table->genpd_virt_dev_lock); + + for (i = 0; i < opp_table->required_opp_count; i++) { + pstate = opp->required_opps[i]->pstate; + + if (!genpd_virt_devs[i]) + continue; + + ret = dev_pm_genpd_set_performance_state(genpd_virt_devs[i], pstate); + if (ret) { + dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n", + dev_name(genpd_virt_devs[i]), pstate, ret); + break; + } + } + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + return ret; +} + /** * dev_pm_opp_set_rate() - Configure new OPP based on frequency * @dev: device for which we do this operation @@ -649,7 +689,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) unsigned long freq, old_freq; struct dev_pm_opp *old_opp, *opp; struct clk *clk; - int ret, size; + int ret; if (unlikely(!target_freq)) { dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, @@ -702,44 +742,34 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, old_freq, freq); - /* Only frequency scaling */ - if (!opp_table->regulators) { - /* - * We don't support devices with both regulator and - * domain performance-state for now. - */ - if (opp_table->genpd_performance_state) - ret = _generic_set_opp_domain(dev, clk, old_freq, freq, - IS_ERR(old_opp) ? 0 : old_opp->pstate, - opp->pstate); - else - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); - } else if (!opp_table->set_opp) { + /* Scaling up? Configure required OPPs before frequency */ + if (freq > old_freq) { + ret = _set_required_opps(dev, opp_table, opp); + if (ret) + goto put_opp; + } + + if (opp_table->set_opp) { + ret = _set_opp_custom(opp_table, dev, old_freq, freq, + IS_ERR(old_opp) ? NULL : old_opp->supplies, + opp->supplies); + } else if (opp_table->regulators) { ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, IS_ERR(old_opp) ? NULL : old_opp->supplies, opp->supplies); } else { - struct dev_pm_set_opp_data *data; - - data = opp_table->set_opp_data; - data->regulators = opp_table->regulators; - data->regulator_count = opp_table->regulator_count; - data->clk = clk; - data->dev = dev; - - data->old_opp.rate = old_freq; - size = sizeof(*opp->supplies) * opp_table->regulator_count; - if (IS_ERR(old_opp)) - memset(data->old_opp.supplies, 0, size); - else - memcpy(data->old_opp.supplies, old_opp->supplies, size); - - data->new_opp.rate = freq; - memcpy(data->new_opp.supplies, opp->supplies, size); + /* Only frequency scaling */ + ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); + } - ret = opp_table->set_opp(data); + /* Scaling down? Configure required OPPs after frequency */ + if (!ret && freq < old_freq) { + ret = _set_required_opps(dev, opp_table, opp); + if (ret) + dev_err(dev, "Failed to set required opps: %d\n", ret); } +put_opp: dev_pm_opp_put(opp); put_old_opp: if (!IS_ERR(old_opp)) @@ -810,8 +840,12 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) return NULL; mutex_init(&opp_table->lock); + mutex_init(&opp_table->genpd_virt_dev_lock); INIT_LIST_HEAD(&opp_table->dev_list); + /* Mark regulator count uninitialized */ + opp_table->regulator_count = -1; + opp_dev = _add_opp_dev(dev, opp_table); if (!opp_dev) { kfree(opp_table); @@ -888,6 +922,8 @@ static void _opp_table_kref_release(struct kref *kref) struct opp_table *opp_table = container_of(kref, struct opp_table, kref); struct opp_device *opp_dev, *temp; + _of_clear_opp_table(opp_table); + /* Release clk */ if (!IS_ERR(opp_table->clk)) clk_put(opp_table->clk); @@ -905,6 +941,7 @@ static void _opp_table_kref_release(struct kref *kref) _remove_opp_dev(opp_dev, opp_table); } + mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); list_del(&opp_table->node); kfree(opp_table); @@ -961,6 +998,7 @@ static void _opp_kref_release(struct kref *kref) * frequency/voltage list. */ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); + _of_opp_free_required_opps(opp_table, opp); opp_debug_remove_one(opp); list_del(&opp->node); kfree(opp); @@ -1028,7 +1066,7 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *table) int count, supply_size; /* Allocate space for at least one supply */ - count = table->regulator_count ? table->regulator_count : 1; + count = table->regulator_count > 0 ? table->regulator_count : 1; supply_size = sizeof(*opp->supplies) * count; /* allocate new OPP node and supplies structures */ @@ -1049,6 +1087,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, struct regulator *reg; int i; + if (!opp_table->regulators) + return true; + for (i = 0; i < opp_table->regulator_count; i++) { reg = opp_table->regulators[i]; @@ -1333,7 +1374,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table) struct dev_pm_set_opp_data *data; int len, count = opp_table->regulator_count; - if (WARN_ON(!count)) + if (WARN_ON(!opp_table->regulators)) return -EINVAL; /* space for set_opp_data */ @@ -1430,7 +1471,7 @@ free_regulators: kfree(opp_table->regulators); opp_table->regulators = NULL; - opp_table->regulator_count = 0; + opp_table->regulator_count = -1; err: dev_pm_opp_put_opp_table(opp_table); @@ -1459,7 +1500,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) kfree(opp_table->regulators); opp_table->regulators = NULL; - opp_table->regulator_count = 0; + opp_table->regulator_count = -1; put_opp_table: dev_pm_opp_put_opp_table(opp_table); @@ -1587,6 +1628,155 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); /** + * dev_pm_opp_set_genpd_virt_dev - Set virtual genpd device for an index + * @dev: Consumer device for which the genpd device is getting set. + * @virt_dev: virtual genpd device. + * @index: index. + * + * Multiple generic power domains for a device are supported with the help of + * virtual genpd devices, which are created for each consumer device - genpd + * pair. These are the device structures which are attached to the power domain + * and are required by the OPP core to set the performance state of the genpd. + * + * This helper will normally be called by the consumer driver of the device + * "dev", as only that has details of the genpd devices. + * + * This helper needs to be called once for each of those virtual devices, but + * only if multiple domains are available for a device. Otherwise the original + * device structure will be used instead by the OPP core. + */ +struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, + struct device *virt_dev, + int index) +{ + struct opp_table *opp_table; + + opp_table = dev_pm_opp_get_opp_table(dev); + if (!opp_table) + return ERR_PTR(-ENOMEM); + + mutex_lock(&opp_table->genpd_virt_dev_lock); + + if (unlikely(!opp_table->genpd_virt_devs || + index >= opp_table->required_opp_count || + opp_table->genpd_virt_devs[index])) { + + dev_err(dev, "Invalid request to set required device\n"); + dev_pm_opp_put_opp_table(opp_table); + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + return ERR_PTR(-EINVAL); + } + + opp_table->genpd_virt_devs[index] = virt_dev; + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + return opp_table; +} + +/** + * dev_pm_opp_put_genpd_virt_dev() - Releases resources blocked for genpd device. + * @opp_table: OPP table returned by dev_pm_opp_set_genpd_virt_dev(). + * @virt_dev: virtual genpd device. + * + * This releases the resource previously acquired with a call to + * dev_pm_opp_set_genpd_virt_dev(). The consumer driver shall call this helper + * if it doesn't want OPP core to update performance state of a power domain + * anymore. + */ +void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, + struct device *virt_dev) +{ + int i; + + /* + * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting + * used in parallel. + */ + mutex_lock(&opp_table->genpd_virt_dev_lock); + + for (i = 0; i < opp_table->required_opp_count; i++) { + if (opp_table->genpd_virt_devs[i] != virt_dev) + continue; + + opp_table->genpd_virt_devs[i] = NULL; + dev_pm_opp_put_opp_table(opp_table); + + /* Drop the vote */ + dev_pm_genpd_set_performance_state(virt_dev, 0); + break; + } + + mutex_unlock(&opp_table->genpd_virt_dev_lock); + + if (unlikely(i == opp_table->required_opp_count)) + dev_err(virt_dev, "Failed to find required device entry\n"); +} + +/** + * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. + * @src_table: OPP table which has dst_table as one of its required OPP table. + * @dst_table: Required OPP table of the src_table. + * @pstate: Current performance state of the src_table. + * + * This Returns pstate of the OPP (present in @dst_table) pointed out by the + * "required-opps" property of the OPP (present in @src_table) which has + * performance state set to @pstate. + * + * Return: Zero or positive performance state on success, otherwise negative + * value on errors. + */ +int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, + struct opp_table *dst_table, + unsigned int pstate) +{ + struct dev_pm_opp *opp; + int dest_pstate = -EINVAL; + int i; + + if (!pstate) + return 0; + + /* + * Normally the src_table will have the "required_opps" property set to + * point to one of the OPPs in the dst_table, but in some cases the + * genpd and its master have one to one mapping of performance states + * and so none of them have the "required-opps" property set. Return the + * pstate of the src_table as it is in such cases. + */ + if (!src_table->required_opp_count) + return pstate; + + for (i = 0; i < src_table->required_opp_count; i++) { + if (src_table->required_opp_tables[i]->np == dst_table->np) + break; + } + + if (unlikely(i == src_table->required_opp_count)) { + pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", + __func__, src_table, dst_table); + return -EINVAL; + } + + mutex_lock(&src_table->lock); + + list_for_each_entry(opp, &src_table->opp_list, node) { + if (opp->pstate == pstate) { + dest_pstate = opp->required_opps[i]->pstate; + goto unlock; + } + } + + pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, + dst_table); + +unlock: + mutex_unlock(&src_table->lock); + + return dest_pstate; +} + +/** * dev_pm_opp_add() - Add an OPP table from a table definitions * @dev: device for which we do this operation * @freq: Frequency in Hz for this OPP @@ -1612,6 +1802,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) if (!opp_table) return -ENOMEM; + /* Fix regulator count for dynamic OPPs */ + opp_table->regulator_count = 1; + ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); if (ret) dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 38a08805a30c..06f0f632ec47 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -73,6 +73,167 @@ struct opp_table *_managed_opp(struct device *dev, int index) return managed_table; } +/* The caller must call dev_pm_opp_put() after the OPP is used */ +static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, + struct device_node *opp_np) +{ + struct dev_pm_opp *opp; + + lockdep_assert_held(&opp_table_lock); + + mutex_lock(&opp_table->lock); + + list_for_each_entry(opp, &opp_table->opp_list, node) { + if (opp->np == opp_np) { + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + return opp; + } + } + + mutex_unlock(&opp_table->lock); + + return NULL; +} + +static struct device_node *of_parse_required_opp(struct device_node *np, + int index) +{ + struct device_node *required_np; + + required_np = of_parse_phandle(np, "required-opps", index); + if (unlikely(!required_np)) { + pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n", + __func__, np, index); + } + + return required_np; +} + +/* The caller must call dev_pm_opp_put_opp_table() after the table is used */ +static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) +{ + struct opp_table *opp_table; + struct device_node *opp_table_np; + + lockdep_assert_held(&opp_table_lock); + + opp_table_np = of_get_parent(opp_np); + if (!opp_table_np) + goto err; + + /* It is safe to put the node now as all we need now is its address */ + of_node_put(opp_table_np); + + list_for_each_entry(opp_table, &opp_tables, node) { + if (opp_table_np == opp_table->np) { + _get_opp_table_kref(opp_table); + return opp_table; + } + } + +err: + return ERR_PTR(-ENODEV); +} + +/* Free resources previously acquired by _opp_table_alloc_required_tables() */ +static void _opp_table_free_required_tables(struct opp_table *opp_table) +{ + struct opp_table **required_opp_tables = opp_table->required_opp_tables; + struct device **genpd_virt_devs = opp_table->genpd_virt_devs; + int i; + + if (!required_opp_tables) + return; + + for (i = 0; i < opp_table->required_opp_count; i++) { + if (IS_ERR_OR_NULL(required_opp_tables[i])) + break; + + dev_pm_opp_put_opp_table(required_opp_tables[i]); + } + + kfree(required_opp_tables); + kfree(genpd_virt_devs); + + opp_table->required_opp_count = 0; + opp_table->genpd_virt_devs = NULL; + opp_table->required_opp_tables = NULL; +} + +/* + * Populate all devices and opp tables which are part of "required-opps" list. + * Checking only the first OPP node should be enough. + */ +static void _opp_table_alloc_required_tables(struct opp_table *opp_table, + struct device *dev, + struct device_node *opp_np) +{ + struct opp_table **required_opp_tables; + struct device **genpd_virt_devs = NULL; + struct device_node *required_np, *np; + int count, i; + + /* Traversing the first OPP node is all we need */ + np = of_get_next_available_child(opp_np, NULL); + if (!np) { + dev_err(dev, "Empty OPP table\n"); + return; + } + + count = of_count_phandle_with_args(np, "required-opps", NULL); + if (!count) + goto put_np; + + if (count > 1) { + genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs), + GFP_KERNEL); + if (!genpd_virt_devs) + goto put_np; + } + + required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), + GFP_KERNEL); + if (!required_opp_tables) { + kfree(genpd_virt_devs); + goto put_np; + } + + opp_table->genpd_virt_devs = genpd_virt_devs; + opp_table->required_opp_tables = required_opp_tables; + opp_table->required_opp_count = count; + + for (i = 0; i < count; i++) { + required_np = of_parse_required_opp(np, i); + if (!required_np) + goto free_required_tables; + + required_opp_tables[i] = _find_table_of_opp_np(required_np); + of_node_put(required_np); + + if (IS_ERR(required_opp_tables[i])) + goto free_required_tables; + + /* + * We only support genpd's OPPs in the "required-opps" for now, + * as we don't know how much about other cases. Error out if the + * required OPP doesn't belong to a genpd. + */ + if (!required_opp_tables[i]->is_genpd) { + dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n", + required_np); + goto free_required_tables; + } + } + + goto put_np; + +free_required_tables: + _opp_table_free_required_tables(opp_table); +put_np: + of_node_put(np); +} + void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) { @@ -92,6 +253,9 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, of_property_read_u32(np, "voltage-tolerance", &opp_table->voltage_tolerance_v1); + if (of_find_property(np, "#power-domain-cells", NULL)) + opp_table->is_genpd = true; + /* Get OPP table node */ opp_np = _opp_of_get_opp_desc_node(np, index); of_node_put(np); @@ -106,9 +270,86 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, opp_table->np = opp_np; + _opp_table_alloc_required_tables(opp_table, dev, opp_np); of_node_put(opp_np); } +void _of_clear_opp_table(struct opp_table *opp_table) +{ + _opp_table_free_required_tables(opp_table); +} + +/* + * Release all resources previously acquired with a call to + * _of_opp_alloc_required_opps(). + */ +void _of_opp_free_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + struct dev_pm_opp **required_opps = opp->required_opps; + int i; + + if (!required_opps) + return; + + for (i = 0; i < opp_table->required_opp_count; i++) { + if (!required_opps[i]) + break; + + /* Put the reference back */ + dev_pm_opp_put(required_opps[i]); + } + + kfree(required_opps); + opp->required_opps = NULL; +} + +/* Populate all required OPPs which are part of "required-opps" list */ +static int _of_opp_alloc_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp) +{ + struct dev_pm_opp **required_opps; + struct opp_table *required_table; + struct device_node *np; + int i, ret, count = opp_table->required_opp_count; + + if (!count) + return 0; + + required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL); + if (!required_opps) + return -ENOMEM; + + opp->required_opps = required_opps; + + for (i = 0; i < count; i++) { + required_table = opp_table->required_opp_tables[i]; + + np = of_parse_required_opp(opp->np, i); + if (unlikely(!np)) { + ret = -ENODEV; + goto free_required_opps; + } + + required_opps[i] = _find_opp_of_np(required_table, np); + of_node_put(np); + + if (!required_opps[i]) { + pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", + __func__, opp->np, i); + ret = -ENODEV; + goto free_required_opps; + } + } + + return 0; + +free_required_opps: + _of_opp_free_required_opps(opp_table, opp); + + return ret; +} + static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, struct device_node *np) { @@ -150,12 +391,10 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, struct opp_table *opp_table) { u32 *microvolt, *microamp = NULL; - int supplies, vcount, icount, ret, i, j; + int supplies = opp_table->regulator_count, vcount, icount, ret, i, j; struct property *prop = NULL; char name[NAME_MAX]; - supplies = opp_table->regulator_count ? opp_table->regulator_count : 1; - /* Search for "opp-microvolt-<name>" */ if (opp_table->prop_name) { snprintf(name, sizeof(name), "opp-microvolt-%s", @@ -170,7 +409,13 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, /* Missing property isn't a problem, but an invalid entry is */ if (!prop) { - if (!opp_table->regulator_count) + if (unlikely(supplies == -1)) { + /* Initialize regulator_count */ + opp_table->regulator_count = 0; + return 0; + } + + if (!supplies) return 0; dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n", @@ -179,6 +424,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, } } + if (unlikely(supplies == -1)) { + /* Initialize regulator_count */ + supplies = opp_table->regulator_count = 1; + } else if (unlikely(!supplies)) { + dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__); + return -EINVAL; + } + vcount = of_property_count_u32_elems(opp->np, name); if (vcount < 0) { dev_err(dev, "%s: Invalid %s property (%d)\n", @@ -326,8 +579,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, ret = of_property_read_u64(np, "opp-hz", &rate); if (ret < 0) { /* "opp-hz" is optional for devices like power domains. */ - if (!of_find_property(dev->of_node, "#power-domain-cells", - NULL)) { + if (!opp_table->is_genpd) { dev_err(dev, "%s: opp-hz not found\n", __func__); goto free_opp; } @@ -354,21 +606,26 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, new_opp->dynamic = false; new_opp->available = true; + ret = _of_opp_alloc_required_opps(opp_table, new_opp); + if (ret) + goto free_opp; + if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; - new_opp->pstate = of_genpd_opp_to_performance_state(dev, np); - ret = opp_parse_supplies(new_opp, dev, opp_table); if (ret) - goto free_opp; + goto free_required_opps; + + if (opp_table->is_genpd) + new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp); ret = _opp_add(dev, new_opp, opp_table, rate_not_available); if (ret) { /* Don't return error for duplicate OPPs */ if (ret == -EBUSY) ret = 0; - goto free_opp; + goto free_required_opps; } /* OPP to select on device suspend */ @@ -398,6 +655,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); return new_opp; +free_required_opps: + _of_opp_free_required_opps(opp_table, new_opp); free_opp: _opp_free(new_opp); @@ -727,58 +986,48 @@ put_cpu_node: EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); /** - * of_dev_pm_opp_find_required_opp() - Search for required OPP. - * @dev: The device whose OPP node is referenced by the 'np' DT node. + * of_get_required_opp_performance_state() - Search for required OPP and return its performance state. * @np: Node that contains the "required-opps" property. + * @index: Index of the phandle to parse. * - * Returns the OPP of the device 'dev', whose phandle is present in the "np" - * node. Although the "required-opps" property supports having multiple - * phandles, this helper routine only parses the very first phandle in the list. - * - * Return: Matching opp, else returns ERR_PTR in case of error and should be - * handled using IS_ERR. + * Returns the performance state of the OPP pointed out by the "required-opps" + * property at @index in @np. * - * The callers are required to call dev_pm_opp_put() for the returned OPP after - * use. + * Return: Zero or positive performance state on success, otherwise negative + * value on errors. */ -struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, - struct device_node *np) +int of_get_required_opp_performance_state(struct device_node *np, int index) { - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV); + struct dev_pm_opp *opp; struct device_node *required_np; struct opp_table *opp_table; + int pstate = -EINVAL; - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) - return ERR_CAST(opp_table); + required_np = of_parse_required_opp(np, index); + if (!required_np) + return -EINVAL; - required_np = of_parse_phandle(np, "required-opps", 0); - if (unlikely(!required_np)) { - dev_err(dev, "Unable to parse required-opps\n"); - goto put_opp_table; + opp_table = _find_table_of_opp_np(required_np); + if (IS_ERR(opp_table)) { + pr_err("%s: Failed to find required OPP table %pOF: %ld\n", + __func__, np, PTR_ERR(opp_table)); + goto put_required_np; } - mutex_lock(&opp_table->lock); - - list_for_each_entry(temp_opp, &opp_table->opp_list, node) { - if (temp_opp->available && temp_opp->np == required_np) { - opp = temp_opp; - - /* Increment the reference count of OPP */ - dev_pm_opp_get(opp); - break; - } + opp = _find_opp_of_np(opp_table, required_np); + if (opp) { + pstate = opp->pstate; + dev_pm_opp_put(opp); } - mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); +put_required_np: of_node_put(required_np); -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - return opp; + return pstate; } -EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp); +EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); /** * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 9c6544b4f4f9..e24d81497375 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -63,6 +63,7 @@ extern struct list_head opp_tables; * @supplies: Power supplies voltage/current values * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. + * @required_opps: List of OPPs that are required by this OPP. * @opp_table: points back to the opp_table struct this opp belongs to * @np: OPP's device node. * @dentry: debugfs dentry pointer (per opp) @@ -84,6 +85,7 @@ struct dev_pm_opp { unsigned long clock_latency_ns; + struct dev_pm_opp **required_opps; struct opp_table *opp_table; struct device_node *np; @@ -133,13 +135,21 @@ enum opp_table_access { * @parsed_static_opps: True if OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. + * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. + * @genpd_virt_devs: List of virtual devices for multiple genpd support. + * @required_opp_tables: List of device OPP tables that are required by OPPs in + * this table. + * @required_opp_count: Number of required devices. * @supported_hw: Array of version number to support. * @supported_hw_count: Number of elements in supported_hw array. * @prop_name: A name to postfix to many DT properties, while parsing them. * @clk: Device's clock handle * @regulators: Supply regulators - * @regulator_count: Number of power supply regulators + * @regulator_count: Number of power supply regulators. Its value can be -1 + * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt + * property). * @genpd_performance_state: Device's power domain support performance state. + * @is_genpd: Marks if the OPP table belongs to a genpd. * @set_opp: Platform specific set_opp callback * @set_opp_data: Data to be passed to set_opp callback * @dentry: debugfs dentry pointer of the real device directory (not links). @@ -171,13 +181,19 @@ struct opp_table { enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp; + struct mutex genpd_virt_dev_lock; + struct device **genpd_virt_devs; + struct opp_table **required_opp_tables; + unsigned int required_opp_count; + unsigned int *supported_hw; unsigned int supported_hw_count; const char *prop_name; struct clk *clk; struct regulator **regulators; - unsigned int regulator_count; + int regulator_count; bool genpd_performance_state; + bool is_genpd; int (*set_opp)(struct dev_pm_set_opp_data *data); struct dev_pm_set_opp_data *set_opp_data; @@ -206,10 +222,16 @@ void _put_opp_list_kref(struct opp_table *opp_table); #ifdef CONFIG_OF void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index); +void _of_clear_opp_table(struct opp_table *opp_table); struct opp_table *_managed_opp(struct device *dev, int index); +void _of_opp_free_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp); #else static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {} +static inline void _of_clear_opp_table(struct opp_table *opp_table) {} static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; } +static inline void _of_opp_free_required_opps(struct opp_table *opp_table, + struct dev_pm_opp *opp) {} #endif #ifdef CONFIG_DEBUG_FS diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c index 1360a7fa542c..c96c01e09740 100644 --- a/drivers/power/avs/smartreflex.c +++ b/drivers/power/avs/smartreflex.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * OMAP SmartReflex Voltage Control * @@ -11,10 +12,6 @@ * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <x0080970@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -37,7 +34,6 @@ static LIST_HEAD(sr_list); static struct omap_sr_class_data *sr_class; -static struct omap_sr_pmic_data *sr_pmic_data; static struct dentry *sr_dbg_dir; static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value) @@ -780,25 +776,6 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) sr_class->disable(sr, 1); } -/** - * omap_sr_register_pmic() - API to register pmic specific info. - * @pmic_data: The structure containing pmic specific data. - * - * This API is to be called from the PMIC specific code to register with - * smartreflex driver pmic specific info. Currently the only info required - * is the smartreflex init on the PMIC side. - */ -void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data) -{ - if (!pmic_data) { - pr_warn("%s: Trying to register NULL PMIC data structure with smartreflex\n", - __func__); - return; - } - - sr_pmic_data = pmic_data; -} - /* PM Debug FS entries to enable and disable smartreflex. */ static int omap_sr_autocomp_show(void *data, u64 *val) { @@ -1010,8 +987,7 @@ static int omap_sr_remove(struct platform_device *pdev) if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); - if (sr_info->dbg_dir) - debugfs_remove_recursive(sr_info->dbg_dir); + debugfs_remove_recursive(sr_info->dbg_dir); pm_runtime_disable(&pdev->dev); list_del(&sr_info->node); @@ -1065,17 +1041,6 @@ static int __init sr_init(void) { int ret = 0; - /* - * sr_init is a late init. If by then a pmic specific API is not - * registered either there is no need for anything to be done on - * the PMIC side or somebody has forgotten to register a PMIC - * handler. Warn for the second condition. - */ - if (sr_pmic_data && sr_pmic_data->sr_pmic_init) - sr_pmic_data->sr_pmic_init(); - else - pr_warn("%s: No PMIC hook to init smartreflex\n", __func__); - ret = platform_driver_register(&smartreflex_driver); if (ret) { pr_err("%s: platform driver register failed for SR\n", |