summaryrefslogtreecommitdiffstats
path: root/drivers/cpuidle
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r--drivers/cpuidle/Kconfig.arm1
-rw-r--r--drivers/cpuidle/Kconfig.arm641
-rw-r--r--drivers/cpuidle/cpuidle-arm64.c1
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c76
-rw-r--r--drivers/cpuidle/cpuidle.c94
5 files changed, 140 insertions, 33 deletions
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 8c16ab20fb15..8e07c9419153 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -55,6 +55,7 @@ config ARM_AT91_CPUIDLE
config ARM_EXYNOS_CPUIDLE
bool "Cpu Idle Driver for the Exynos processors"
depends on ARCH_EXYNOS
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
help
Select this to enable cpuidle for Exynos processors
diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64
index d0a08ed1b2ee..6effb3656735 100644
--- a/drivers/cpuidle/Kconfig.arm64
+++ b/drivers/cpuidle/Kconfig.arm64
@@ -4,7 +4,6 @@
config ARM64_CPUIDLE
bool "Generic ARM64 CPU idle Driver"
- select ARM64_CPU_SUSPEND
select DT_IDLE_STATES
help
Select this to enable generic cpuidle driver for ARM64.
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c
index 80704b931ba4..39a2c62716c3 100644
--- a/drivers/cpuidle/cpuidle-arm64.c
+++ b/drivers/cpuidle/cpuidle-arm64.c
@@ -19,7 +19,6 @@
#include <linux/of.h>
#include <asm/cpuidle.h>
-#include <asm/suspend.h>
#include "dt_idle_states.h"
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 4003a3160865..26f5f29fdb03 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -1,8 +1,11 @@
-/* linux/arch/arm/mach-exynos/cpuidle.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
+ * Coupled cpuidle support based on the work of:
+ * Colin Cross <ccross@android.com>
+ * Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -13,13 +16,49 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/platform_data/cpuidle-exynos.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include <asm/cpuidle.h>
+static atomic_t exynos_idle_barrier;
+
+static struct cpuidle_exynos_data *exynos_cpuidle_pdata;
static void (*exynos_enter_aftr)(void);
+static int exynos_enter_coupled_lowpower(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ int ret;
+
+ exynos_cpuidle_pdata->pre_enter_aftr();
+
+ /*
+ * Waiting all cpus to reach this point at the same moment
+ */
+ cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier);
+
+ /*
+ * Both cpus will reach this point at the same time
+ */
+ ret = dev->cpu ? exynos_cpuidle_pdata->cpu1_powerdown()
+ : exynos_cpuidle_pdata->cpu0_enter_aftr();
+ if (ret)
+ index = ret;
+
+ /*
+ * Waiting all cpus to finish the power sequence before going further
+ */
+ cpuidle_coupled_parallel_barrier(dev, &exynos_idle_barrier);
+
+ exynos_cpuidle_pdata->post_enter_aftr();
+
+ return index;
+}
+
static int exynos_enter_lowpower(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@@ -55,13 +94,40 @@ static struct cpuidle_driver exynos_idle_driver = {
.safe_state_index = 0,
};
+static struct cpuidle_driver exynos_coupled_idle_driver = {
+ .name = "exynos_coupled_idle",
+ .owner = THIS_MODULE,
+ .states = {
+ [0] = ARM_CPUIDLE_WFI_STATE,
+ [1] = {
+ .enter = exynos_enter_coupled_lowpower,
+ .exit_latency = 5000,
+ .target_residency = 10000,
+ .flags = CPUIDLE_FLAG_COUPLED |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+ },
+ .state_count = 2,
+ .safe_state_index = 0,
+};
+
static int exynos_cpuidle_probe(struct platform_device *pdev)
{
int ret;
- exynos_enter_aftr = (void *)(pdev->dev.platform_data);
+ if (of_machine_is_compatible("samsung,exynos4210")) {
+ exynos_cpuidle_pdata = pdev->dev.platform_data;
+
+ ret = cpuidle_register(&exynos_coupled_idle_driver,
+ cpu_possible_mask);
+ } else {
+ exynos_enter_aftr = (void *)(pdev->dev.platform_data);
+
+ ret = cpuidle_register(&exynos_idle_driver, NULL);
+ }
- ret = cpuidle_register(&exynos_idle_driver, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register cpuidle driver\n");
return ret;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 125150dc6e81..4d534582514e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -19,6 +19,8 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
#include <trace/events/power.h>
#include "cpuidle.h"
@@ -32,7 +34,6 @@ LIST_HEAD(cpuidle_detected_devices);
static int enabled_devices;
static int off __read_mostly;
static int initialized __read_mostly;
-static bool use_deepest_state __read_mostly;
int cpuidle_disabled(void)
{
@@ -66,36 +67,23 @@ int cpuidle_play_dead(void)
}
/**
- * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode.
- * @enable: Whether enable or disable the feature.
- *
- * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and
- * always use the state with the greatest exit latency (out of the states that
- * are not disabled).
- *
- * This function can only be called after cpuidle_pause() to avoid races.
- */
-void cpuidle_use_deepest_state(bool enable)
-{
- use_deepest_state = enable;
-}
-
-/**
- * cpuidle_find_deepest_state - Find the state of the greatest exit latency.
- * @drv: cpuidle driver for a given CPU.
- * @dev: cpuidle device for a given CPU.
+ * cpuidle_find_deepest_state - Find deepest state meeting specific conditions.
+ * @drv: cpuidle driver for the given CPU.
+ * @dev: cpuidle device for the given CPU.
+ * @freeze: Whether or not the state should be suitable for suspend-to-idle.
*/
static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev, bool freeze)
{
unsigned int latency_req = 0;
- int i, ret = CPUIDLE_DRIVER_STATE_START - 1;
+ int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable || s->exit_latency <= latency_req)
+ if (s->disabled || su->disable || s->exit_latency <= latency_req
+ || (freeze && !s->enter_freeze))
continue;
latency_req = s->exit_latency;
@@ -104,6 +92,63 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
+static void enter_freeze_proper(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
+{
+ tick_freeze();
+ /*
+ * The state used here cannot be a "coupled" one, because the "coupled"
+ * cpuidle mechanism enables interrupts and doing that with timekeeping
+ * suspended is generally unsafe.
+ */
+ drv->states[index].enter_freeze(dev, drv, index);
+ WARN_ON(!irqs_disabled());
+ /*
+ * timekeeping_resume() that will be called by tick_unfreeze() for the
+ * last CPU executing it calls functions containing RCU read-side
+ * critical sections, so tell RCU about that.
+ */
+ RCU_NONIDLE(tick_unfreeze());
+}
+
+/**
+ * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ *
+ * If there are states with the ->enter_freeze callback, find the deepest of
+ * them and enter it with frozen tick. Otherwise, find the deepest state
+ * available and enter it normally.
+ */
+void cpuidle_enter_freeze(void)
+{
+ struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int index;
+
+ /*
+ * Find the deepest state with ->enter_freeze present, which guarantees
+ * that interrupts won't be enabled when it exits and allows the tick to
+ * be frozen safely.
+ */
+ index = cpuidle_find_deepest_state(drv, dev, true);
+ if (index >= 0) {
+ enter_freeze_proper(drv, dev, index);
+ return;
+ }
+
+ /*
+ * It is not safe to freeze the tick, find the deepest state available
+ * at all and try to enter it normally.
+ */
+ index = cpuidle_find_deepest_state(drv, dev, false);
+ if (index >= 0)
+ cpuidle_enter(drv, dev, index);
+ else
+ arch_cpu_idle();
+
+ /* Interrupts are enabled again here. */
+ local_irq_disable();
+}
+
/**
* cpuidle_enter_state - enter the state and update stats
* @dev: cpuidle device for this cpu
@@ -166,9 +211,6 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (!drv || !dev || !dev->enabled)
return -EBUSY;
- if (unlikely(use_deepest_state))
- return cpuidle_find_deepest_state(drv, dev);
-
return cpuidle_curr_governor->select(drv, dev);
}
@@ -200,7 +242,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
void cpuidle_reflect(struct cpuidle_device *dev, int index)
{
- if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state))
+ if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev, index);
}