summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/power/runtime_pm.txt20
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/base/power/opp.c31
-rw-r--r--drivers/pnp/resource.c4
-rw-r--r--drivers/powercap/intel_rapl.c33
-rw-r--r--kernel/power/Kconfig3
6 files changed, 52 insertions, 40 deletions
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index e1bee8a4aaac..f32ce5419573 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -665,15 +665,17 @@ The PM core does its best to reduce the probability of race conditions between
the runtime PM and system suspend/resume (and hibernation) callbacks by carrying
out the following operations:
- * During system suspend it calls pm_runtime_get_noresume() and
- pm_runtime_barrier() for every device right before executing the
- subsystem-level .suspend() callback for it. In addition to that it calls
- __pm_runtime_disable() with 'false' as the second argument for every device
- right before executing the subsystem-level .suspend_late() callback for it.
-
- * During system resume it calls pm_runtime_enable() and pm_runtime_put()
- for every device right after executing the subsystem-level .resume_early()
- callback and right after executing the subsystem-level .resume() callback
+ * During system suspend pm_runtime_get_noresume() is called for every device
+ right before executing the subsystem-level .prepare() callback for it and
+ pm_runtime_barrier() is called for every device right before executing the
+ subsystem-level .suspend() callback for it. In addition to that the PM core
+ calls __pm_runtime_disable() with 'false' as the second argument for every
+ device right before executing the subsystem-level .suspend_late() callback
+ for it.
+
+ * During system resume pm_runtime_enable() and pm_runtime_put() are called for
+ every device right after executing the subsystem-level .resume_early()
+ callback and right after executing the subsystem-level .complete() callback
for it, respectively.
7. Generic subsystem callbacks
diff --git a/MAINTAINERS b/MAINTAINERS
index 8ccf31c95a15..4ba0253dd593 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6938,7 +6938,6 @@ F: drivers/power/
PNP SUPPORT
M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-M: Bjorn Helgaas <bhelgaas@google.com>
S: Maintained
F: drivers/pnp/
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 25538675d59e..39412c15db70 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -394,6 +394,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
+ *
+ * Return:
+ * 0: On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST: Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM: Memory allocation failure
*/
int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
@@ -443,15 +450,31 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
new_opp->u_volt = u_volt;
new_opp->available = true;
- /* Insert new OPP in order of increasing frequency */
+ /*
+ * Insert new OPP in order of increasing frequency
+ * and discard if already present
+ */
head = &dev_opp->opp_list;
list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
- if (new_opp->rate < opp->rate)
+ if (new_opp->rate <= opp->rate)
break;
else
head = &opp->node;
}
+ /* Duplicate OPPs ? */
+ if (new_opp->rate == opp->rate) {
+ int ret = opp->available && new_opp->u_volt == opp->u_volt ?
+ 0 : -EEXIST;
+
+ dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
+ __func__, opp->rate, opp->u_volt, opp->available,
+ new_opp->rate, new_opp->u_volt, new_opp->available);
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ return ret;
+ }
+
list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
@@ -734,11 +757,9 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (dev_pm_opp_add(dev, freq, volt)) {
+ if (dev_pm_opp_add(dev, freq, volt))
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
- continue;
- }
nr -= 2;
}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 01712cbfd92e..782e82289571 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
return 1;
/* check if the resource is valid */
- if (*irq < 0 || *irq > 15)
+ if (*irq > 15)
return 0;
/* check if the resource is reserved */
@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
return 1;
/* check if the resource is valid */
- if (*dma < 0 || *dma == 4 || *dma > 7)
+ if (*dma == 4 || *dma > 7)
return 0;
/* check if the resource is reserved */
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index d9a0770b6c73..b1cda6ffdbcc 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -951,7 +951,9 @@ static const struct x86_cpu_id rapl_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */
{ X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
{ X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
- { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */
+ { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */
+ { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */
+ { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */
/* TODO: Add more CPU IDs after testing */
{}
};
@@ -1124,8 +1126,7 @@ err_cleanup_package:
static int rapl_check_domain(int cpu, int domain)
{
unsigned msr;
- u64 val1, val2 = 0;
- int retry = 0;
+ u64 val = 0;
switch (domain) {
case RAPL_DOMAIN_PACKAGE:
@@ -1144,26 +1145,13 @@ static int rapl_check_domain(int cpu, int domain)
pr_err("invalid domain id %d\n", domain);
return -EINVAL;
}
- if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
- return -ENODEV;
-
- /* PP1/uncore/graphics domain may not be active at the time of
- * driver loading. So skip further checks.
+ /* make sure domain counters are available and contains non-zero
+ * values, otherwise skip it.
*/
- if (domain == RAPL_DOMAIN_PP1)
- return 0;
- /* energy counters roll slowly on some domains */
- while (++retry < 10) {
- usleep_range(10000, 15000);
- rdmsrl_safe_on_cpu(cpu, msr, &val2);
- if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
- return 0;
- }
- /* if energy counter does not change, report as bad domain */
- pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
- rapl_domain_names[domain], val1, val2);
+ if (rdmsrl_safe_on_cpu(cpu, msr, &val) || !val)
+ return -ENODEV;
- return -ENODEV;
+ return 0;
}
/* Detect active and valid domains for the given CPU, caller must
@@ -1180,6 +1168,9 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
/* use physical package id to read counters */
if (!rapl_check_domain(cpu, i))
rp->domain_map |= 1 << i;
+ else
+ pr_warn("RAPL domain %s detection failed\n",
+ rapl_domain_names[i]);
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 2fac9cc79b3d..9a83d780facd 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -257,8 +257,7 @@ config ARCH_HAS_OPP
bool
config PM_OPP
- bool "Operating Performance Point (OPP) Layer library"
- depends on ARCH_HAS_OPP
+ bool
---help---
SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This