diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-07 10:13:52 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-07 10:13:52 -0800 |
commit | 3c00303206c3a1ccd86579efdc90bc35f140962e (patch) | |
tree | 66170c84b5ddaeb102aea3530517a26657b6ea29 /drivers | |
parent | 83dbb15e9cd78a3619e3db36777e2f81d09b2914 (diff) | |
parent | efb90582c575084723cc14302c1300cb26c7e01f (diff) | |
download | linux-3c00303206c3a1ccd86579efdc90bc35f140962e.tar.bz2 |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux:
cpuidle: Single/Global registration of idle states
cpuidle: Split cpuidle_state structure and move per-cpu statistics fields
cpuidle: Remove CPUIDLE_FLAG_IGNORE and dev->prepare()
cpuidle: Move dev->last_residency update to driver enter routine; remove dev->last_state
ACPI: Fix CONFIG_ACPI_DOCK=n compiler warning
ACPI: Export FADT pm_profile integer value to userspace
thermal: Prevent polling from happening during system suspend
ACPI: Drop ACPI_NO_HARDWARE_INIT
ACPI atomicio: Convert width in bits to bytes in __acpi_ioremap_fast()
PNPACPI: Simplify disabled resource registration
ACPI: Fix possible recursive locking in hwregs.c
ACPI: use kstrdup()
mrst pmu: update comment
tools/power turbostat: less verbose debugging
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/acpica/hwregs.c | 11 | ||||
-rw-r--r-- | drivers/acpi/atomicio.c | 2 | ||||
-rw-r--r-- | drivers/acpi/bus.c | 8 | ||||
-rw-r--r-- | drivers/acpi/processor_driver.c | 20 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 251 | ||||
-rw-r--r-- | drivers/acpi/scan.c | 3 | ||||
-rw-r--r-- | drivers/acpi/sysfs.c | 14 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 86 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 25 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 41 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 29 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 22 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 130 | ||||
-rw-r--r-- | drivers/pnp/pnpacpi/rsparser.c | 62 | ||||
-rw-r--r-- | drivers/thermal/thermal_sys.c | 4 |
15 files changed, 464 insertions, 244 deletions
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 55accb7018bb..cc70f3fdcdd1 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -269,16 +269,17 @@ acpi_status acpi_hw_clear_acpi_status(void) status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS); - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } + + acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + + if (ACPI_FAILURE(status)) + goto exit; /* Clear the GPE Bits in all GPE registers in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); - unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); +exit: return_ACPI_STATUS(status); } diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 04ae1c88c03c..cfc0cc10af39 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c @@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, { struct acpi_iomap *map; - map = __acpi_find_iomap(paddr, size); + map = __acpi_find_iomap(paddr, size/8); if (map) return map->vaddr + (paddr - map->paddr); else diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 437ddbf0c49a..9ecec98bc76e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -911,10 +911,7 @@ void __init acpi_early_init(void) } #endif - status = - acpi_enable_subsystem(~ - (ACPI_NO_HARDWARE_INIT | - ACPI_NO_ACPI_ENABLE)); + status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); goto error0; @@ -935,8 +932,7 @@ static int __init acpi_bus_init(void) acpi_os_initialize1(); - status = - acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE); + status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to start the ACPI Interpreter\n"); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index a4e0f1ba6040..9d7bc9f6b6cc 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, if (action == CPU_ONLINE && pr) { acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_cst_has_changed(pr); + acpi_processor_hotplug(pr); acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } @@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) acpi_processor_get_throttling_info(pr); acpi_processor_get_limit_info(pr); - - if (cpuidle_get_driver() == &acpi_idle_driver) + if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr, device); pr->cdev = thermal_cooling_device_register("Processor", device, @@ -800,17 +799,9 @@ static int __init acpi_processor_init(void) memset(&errata, 0, sizeof(errata)); - if (!cpuidle_register_driver(&acpi_idle_driver)) { - printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", - acpi_idle_driver.name); - } else { - printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", - cpuidle_get_driver()->name); - } - result = acpi_bus_register_driver(&acpi_processor_driver); if (result < 0) - goto out_cpuidle; + return result; acpi_processor_install_hotplug_notify(); @@ -821,11 +812,6 @@ static int __init acpi_processor_init(void) acpi_processor_throttling_init(); return 0; - -out_cpuidle: - cpuidle_unregister_driver(&acpi_idle_driver); - - return result; } static void __exit acpi_processor_exit(void) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 9b88f9828d8c..73b2909dddfe 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -741,22 +741,25 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) /** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU - * @state: the state data + * @drv: cpuidle driver containing cpuidle state info + * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, - struct cpuidle_state *state) + struct cpuidle_driver *drv, int index) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); + dev->last_residency = 0; if (unlikely(!pr)) - return 0; + return -EINVAL; local_irq_disable(); @@ -764,7 +767,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); - return 0; + return -EINVAL; } lapic_timer_state_broadcast(pr, cx, 1); @@ -773,37 +776,47 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); + /* Update device last_residency*/ + dev->last_residency = (int)idle_time; + local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); - return idle_time; + return index; } /** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU - * @state: the state data + * @drv: cpuidle driver with cpuidle state information + * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, - struct cpuidle_state *state) + struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); + dev->last_residency = 0; if (unlikely(!pr)) - return 0; - - if (acpi_idle_suspend) - return(acpi_idle_enter_c1(dev, state)); + return -EINVAL; local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EINVAL; + } + + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -815,7 +828,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); - return 0; + return -EINVAL; } } @@ -837,6 +850,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); + /* Update device last_residency*/ + dev->last_residency = (int)idle_time; + /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); @@ -848,7 +864,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; - return idle_time; + return index; } static int c3_cpu_count; @@ -857,37 +873,43 @@ static DEFINE_RAW_SPINLOCK(c3_lock); /** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU - * @state: the state data + * @drv: cpuidle driver containing state data + * @index: the index of suggested state * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, - struct cpuidle_state *state) + struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; - struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); + dev->last_residency = 0; if (unlikely(!pr)) - return 0; + return -EINVAL; + - if (acpi_idle_suspend) - return(acpi_idle_enter_c1(dev, state)); + if (acpi_idle_suspend) { + cpu_relax(); + return -EINVAL; + } if (!cx->bm_sts_skip && acpi_idle_bm_check()) { - if (dev->safe_state) { - dev->last_state = dev->safe_state; - return dev->safe_state->enter(dev, dev->safe_state); + if (drv->safe_state_index >= 0) { + return drv->states[drv->safe_state_index].enter(dev, + drv, drv->safe_state_index); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); - return 0; + return -EINVAL; } } @@ -904,7 +926,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); - return 0; + return -EINVAL; } } @@ -954,6 +976,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); + /* Update device last_residency*/ + dev->last_residency = (int)idle_time; + /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); @@ -965,7 +990,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; - return idle_time; + return index; } struct cpuidle_driver acpi_idle_driver = { @@ -974,14 +999,16 @@ struct cpuidle_driver acpi_idle_driver = { }; /** - * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE + * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE + * device i.e. per-cpu data + * * @pr: the ACPI processor */ -static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) +static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) { int i, count = CPUIDLE_DRIVER_STATE_START; struct acpi_processor_cx *cx; - struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; struct cpuidle_device *dev = &pr->power.dev; if (!pr->flags.power_setup_done) @@ -992,9 +1019,62 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) } dev->cpu = pr->id; + + if (max_cstate == 0) + max_cstate = 1; + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { + cx = &pr->power.states[i]; + state_usage = &dev->states_usage[count]; + + if (!cx->valid) + continue; + +#ifdef CONFIG_HOTPLUG_CPU + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) + continue; +#endif + + cpuidle_set_statedata(state_usage, cx); + + count++; + if (count == CPUIDLE_STATE_MAX) + break; + } + + dev->state_count = count; + + if (!count) + return -EINVAL; + + return 0; +} + +/** + * acpi_processor_setup_cpuidle states- prepares and configures cpuidle + * global state data i.e. idle routines + * + * @pr: the ACPI processor + */ +static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) +{ + int i, count = CPUIDLE_DRIVER_STATE_START; + struct acpi_processor_cx *cx; + struct cpuidle_state *state; + struct cpuidle_driver *drv = &acpi_idle_driver; + + if (!pr->flags.power_setup_done) + return -EINVAL; + + if (pr->flags.power == 0) + return -EINVAL; + + drv->safe_state_index = -1; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { - dev->states[i].name[0] = '\0'; - dev->states[i].desc[0] = '\0'; + drv->states[i].name[0] = '\0'; + drv->states[i].desc[0] = '\0'; } if (max_cstate == 0) @@ -1002,7 +1082,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { cx = &pr->power.states[i]; - state = &dev->states[count]; if (!cx->valid) continue; @@ -1013,8 +1092,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) continue; #endif - cpuidle_set_statedata(state, cx); + state = &drv->states[count]; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); state->exit_latency = cx->latency; @@ -1027,13 +1106,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_c1; - dev->safe_state = state; + drv->safe_state_index = count; break; case ACPI_STATE_C2: state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = acpi_idle_enter_simple; - dev->safe_state = state; + drv->safe_state_index = count; break; case ACPI_STATE_C3: @@ -1049,7 +1128,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) break; } - dev->state_count = count; + drv->state_count = count; if (!count) return -EINVAL; @@ -1057,7 +1136,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) return 0; } -int acpi_processor_cst_has_changed(struct acpi_processor *pr) +int acpi_processor_hotplug(struct acpi_processor *pr) { int ret = 0; @@ -1078,7 +1157,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) cpuidle_disable_device(&pr->power.dev); acpi_processor_get_power_info(pr); if (pr->flags.power) { - acpi_processor_setup_cpuidle(pr); + acpi_processor_setup_cpuidle_cx(pr); ret = cpuidle_enable_device(&pr->power.dev); } cpuidle_resume_and_unlock(); @@ -1086,10 +1165,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) return ret; } +int acpi_processor_cst_has_changed(struct acpi_processor *pr) +{ + int cpu; + struct acpi_processor *_pr; + + if (disabled_by_idle_boot_param()) + return 0; + + if (!pr) + return -EINVAL; + + if (nocst) + return -ENODEV; + + if (!pr->flags.power_setup_done) + return -ENODEV; + + /* + * FIXME: Design the ACPI notification to make it once per + * system instead of once per-cpu. This condition is a hack + * to make the code that updates C-States be called once. + */ + + if (smp_processor_id() == 0 && + cpuidle_get_driver() == &acpi_idle_driver) { + + cpuidle_pause_and_lock(); + /* Protect against cpu-hotplug */ + get_online_cpus(); + + /* Disable all cpuidle devices */ + for_each_online_cpu(cpu) { + _pr = per_cpu(processors, cpu); + if (!_pr || !_pr->flags.power_setup_done) + continue; + cpuidle_disable_device(&_pr->power.dev); + } + + /* Populate Updated C-state information */ + acpi_processor_setup_cpuidle_states(pr); + + /* Enable all cpuidle devices */ + for_each_online_cpu(cpu) { + _pr = per_cpu(processors, cpu); + if (!_pr || !_pr->flags.power_setup_done) + continue; + acpi_processor_get_power_info(_pr); + if (_pr->flags.power) { + acpi_processor_setup_cpuidle_cx(_pr); + cpuidle_enable_device(&_pr->power.dev); + } + } + put_online_cpus(); + cpuidle_resume_and_unlock(); + } + + return 0; +} + +static int acpi_processor_registered; + int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { acpi_status status = 0; + int retval; static int first_run; if (disabled_by_idle_boot_param()) @@ -1126,9 +1267,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, * platforms that only support C1. */ if (pr->flags.power) { - acpi_processor_setup_cpuidle(pr); - if (cpuidle_register_device(&pr->power.dev)) - return -EIO; + /* Register acpi_idle_driver if not already registered */ + if (!acpi_processor_registered) { + acpi_processor_setup_cpuidle_states(pr); + retval = cpuidle_register_driver(&acpi_idle_driver); + if (retval) + return retval; + printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", + acpi_idle_driver.name); + } + /* Register per-cpu cpuidle_device. Cpuidle driver + * must already be registered before registering device + */ + acpi_processor_setup_cpuidle_cx(pr); + retval = cpuidle_register_device(&pr->power.dev); + if (retval) { + if (acpi_processor_registered == 0) + cpuidle_unregister_driver(&acpi_idle_driver); + return retval; + } + acpi_processor_registered++; } return 0; } @@ -1139,8 +1297,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr, if (disabled_by_idle_boot_param()) return 0; - cpuidle_unregister_device(&pr->power.dev); - pr->flags.power_setup_done = 0; + if (pr->flags.power) { + cpuidle_unregister_device(&pr->power.dev); + acpi_processor_registered--; + if (acpi_processor_registered == 0) + cpuidle_unregister_driver(&acpi_idle_driver); + } + pr->flags.power_setup_done = 0; return 0; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 449c556274c0..8ab80bafe3f1 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1062,13 +1062,12 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id) if (!id) return; - id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL); + id->id = kstrdup(dev_id, GFP_KERNEL); if (!id->id) { kfree(id); return; } - strcpy(id->id, dev_id); list_add_tail(&id->list, &device->pnp.ids); } diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index c538d0ef10ff..9f66181c814e 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -706,11 +706,23 @@ static void __exit interrupt_stats_exit(void) return; } +static ssize_t +acpi_show_profile(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); +} + +static const struct device_attribute pm_profile_attr = + __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); + int __init acpi_sysfs_init(void) { int result; result = acpi_tables_sysfs_init(); - + if (result) + return result; + result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); return result; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index becd6d99203b..06ce2680d00d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -62,8 +62,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); + struct cpuidle_driver *drv = cpuidle_get_driver(); struct cpuidle_state *target_state; - int next_state; + int next_state, entered_state; if (off) return -ENODEV; @@ -84,45 +85,36 @@ int cpuidle_idle_call(void) hrtimer_peek_ahead_timers(); #endif - /* - * Call the device's prepare function before calling the - * governor's select function. ->prepare gives the device's - * cpuidle driver a chance to update any dynamic information - * of its cpuidle states for the current idle period, e.g. - * state availability, latencies, residencies, etc. - */ - if (dev->prepare) - dev->prepare(dev); - /* ask the governor for the next state */ - next_state = cpuidle_curr_governor->select(dev); + next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { local_irq_enable(); return 0; } - target_state = &dev->states[next_state]; - - /* enter the state and update stats */ - dev->last_state = target_state; + target_state = &drv->states[next_state]; trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu); - dev->last_residency = target_state->enter(dev, target_state); + entered_state = target_state->enter(dev, drv, next_state); trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); - if (dev->last_state) - target_state = dev->last_state; - - target_state->time += (unsigned long long)dev->last_residency; - target_state->usage++; + if (entered_state >= 0) { + /* Update cpuidle counters */ + /* This can be moved to within driver enter routine + * but that results in multiple copies of same code. + */ + dev->states_usage[entered_state].time += + (unsigned long long)dev->last_residency; + dev->states_usage[entered_state].usage++; + } /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) - cpuidle_curr_governor->reflect(dev); + cpuidle_curr_governor->reflect(dev, entered_state); return 0; } @@ -173,11 +165,11 @@ void cpuidle_resume_and_unlock(void) EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); #ifdef CONFIG_ARCH_HAS_CPU_RELAX -static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) +static int poll_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { ktime_t t1, t2; s64 diff; - int ret; t1 = ktime_get(); local_irq_enable(); @@ -189,15 +181,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) if (diff > INT_MAX) diff = INT_MAX; - ret = (int) diff; - return ret; + dev->last_residency = (int) diff; + + return index; } -static void poll_idle_init(struct cpuidle_device *dev) +static void poll_idle_init(struct cpuidle_driver *drv) { - struct cpuidle_state *state = &dev->states[0]; - - cpuidle_set_statedata(state, NULL); + struct cpuidle_state *state = &drv->states[0]; snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); @@ -208,7 +199,7 @@ static void poll_idle_init(struct cpuidle_device *dev) state->enter = poll_idle; } #else -static void poll_idle_init(struct cpuidle_device *dev) {} +static void poll_idle_init(struct cpuidle_driver *drv) {} #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ /** @@ -235,21 +226,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev) return ret; } - poll_idle_init(dev); + poll_idle_init(cpuidle_get_driver()); if ((ret = cpuidle_add_state_sysfs(dev))) return ret; if (cpuidle_curr_governor->enable && - (ret = cpuidle_curr_governor->enable(dev))) + (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) goto fail_sysfs; for (i = 0; i < dev->state_count; i++) { - dev->states[i].usage = 0; - dev->states[i].time = 0; + dev->states_usage[i].usage = 0; + dev->states_usage[i].time = 0; } dev->last_residency = 0; - dev->last_state = NULL; smp_wmb(); @@ -283,7 +273,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) dev->enabled = 0; if (cpuidle_curr_governor->disable) - cpuidle_curr_governor->disable(dev); + cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); cpuidle_remove_state_sysfs(dev); enabled_devices--; @@ -311,26 +301,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) init_completion(&dev->kobj_unregister); - /* - * cpuidle driver should set the dev->power_specified bit - * before registering the device if the driver provides - * power_usage numbers. - * - * For those devices whose ->power_specified is not set, - * we fill in power_usage with decreasing values as the - * cpuidle code has an implicit assumption that state Cn - * uses less power than C(n-1). - * - * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned - * an power value of -1. So we use -2, -3, etc, for other - * c-states. - */ - if (!dev->power_specified) { - int i; - for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) - dev->states[i].power_usage = -1 - i; - } - per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); if ((ret = cpuidle_add_sysfs(sys_dev))) { diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 3f7e3cedd133..284d7af5a9c8 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -17,6 +17,30 @@ static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); +static void __cpuidle_register_driver(struct cpuidle_driver *drv) +{ + int i; + /* + * cpuidle driver should set the drv->power_specified bit + * before registering if the driver provides + * power_usage numbers. + * + * If power_specified is not set, + * we fill in power_usage with decreasing values as the + * cpuidle code has an implicit assumption that state Cn + * uses less power than C(n-1). + * + * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned + * an power value of -1. So we use -2, -3, etc, for other + * c-states. + */ + if (!drv->power_specified) { + for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) + drv->states[i].power_usage = -1 - i; + } +} + + /** * cpuidle_register_driver - registers a driver * @drv: the driver @@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) spin_unlock(&cpuidle_driver_lock); return -EBUSY; } + __cpuidle_register_driver(drv); cpuidle_curr_driver = drv; spin_unlock(&cpuidle_driver_lock); diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 3b8fce20f023..b6a09ea859b1 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev, /** * ladder_select_state - selects the next state to enter + * @drv: cpuidle driver * @dev: the CPU */ -static int ladder_select_state(struct cpuidle_device *dev) +static int ladder_select_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) { struct ladder_device *ldev = &__get_cpu_var(ladder_devices); struct ladder_device_state *last_state; @@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev) last_state = &ldev->states[last_idx]; - if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) - last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; + if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { + last_residency = cpuidle_get_last_residency(dev) - \ + drv->states[last_idx].exit_latency; + } else last_residency = last_state->threshold.promotion_time + 1; /* consider promotion */ - if (last_idx < dev->state_count - 1 && + if (last_idx < drv->state_count - 1 && last_residency > last_state->threshold.promotion_time && - dev->states[last_idx + 1].exit_latency <= latency_req) { + drv->states[last_idx + 1].exit_latency <= latency_req) { last_state->stats.promotion_count++; last_state->stats.demotion_count = 0; if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { @@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev) /* consider demotion */ if (last_idx > CPUIDLE_DRIVER_STATE_START && - dev->states[last_idx].exit_latency > latency_req) { + drv->states[last_idx].exit_latency > latency_req) { int i; for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { - if (dev->states[i].exit_latency <= latency_req) + if (drv->states[i].exit_latency <= latency_req) break; } ladder_do_selection(ldev, last_idx, i); @@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev) /** * ladder_enable_device - setup for the governor + * @drv: cpuidle driver * @dev: the CPU */ -static int ladder_enable_device(struct cpuidle_device *dev) +static int ladder_enable_device(struct cpuidle_driver *drv, + struct cpuidle_device *dev) { int i; struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); @@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev) ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; - for (i = 0; i < dev->state_count; i++) { - state = &dev->states[i]; + for (i = 0; i < drv->state_count; i++) { + state = &drv->states[i]; lstate = &ldev->states[i]; lstate->stats.promotion_count = 0; @@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) lstate->threshold.promotion_count = PROMOTION_COUNT; lstate->threshold.demotion_count = DEMOTION_COUNT; - if (i < dev->state_count - 1) + if (i < drv->state_count - 1) lstate->threshold.promotion_time = state->exit_latency; if (i > 0) lstate->threshold.demotion_time = state->exit_latency; @@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev) return 0; } +/** + * ladder_reflect - update the correct last_state_idx + * @dev: the CPU + * @index: the index of actual state entered + */ +static void ladder_reflect(struct cpuidle_device *dev, int index) +{ + struct ladder_device *ldev = &__get_cpu_var(ladder_devices); + if (index > 0) + ldev->last_state_idx = index; +} + static struct cpuidle_governor ladder_governor = { .name = "ladder", .rating = 10, .enable = ladder_enable_device, .select = ladder_select_state, + .reflect = ladder_reflect, .owner = THIS_MODULE, }; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 00275244ce2f..ad0952601ae2 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -183,7 +183,7 @@ static inline int performance_multiplier(void) static DEFINE_PER_CPU(struct menu_device, menu_devices); -static void menu_update(struct cpuidle_device *dev); +static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ static u64 div_round64(u64 dividend, u32 divisor) @@ -229,9 +229,10 @@ static void detect_repeating_patterns(struct menu_device *data) /** * menu_select - selects the next idle state to enter + * @drv: cpuidle driver containing state data * @dev: the CPU */ -static int menu_select(struct cpuidle_device *dev) +static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); @@ -241,7 +242,7 @@ static int menu_select(struct cpuidle_device *dev) struct timespec t; if (data->needs_update) { - menu_update(dev); + menu_update(drv, dev); data->needs_update = 0; } @@ -286,11 +287,9 @@ static int menu_select(struct cpuidle_device *dev) * Find the idle state with the lowest power while satisfying * our constraints. */ - for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { - struct cpuidle_state *s = &dev->states[i]; + for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; - if (s->flags & CPUIDLE_FLAG_IGNORE) - continue; if (s->target_residency > data->predicted_us) continue; if (s->exit_latency > latency_req) @@ -311,26 +310,30 @@ static int menu_select(struct cpuidle_device *dev) /** * menu_reflect - records that data structures need update * @dev: the CPU + * @index: the index of actual entered state * * NOTE: it's important to be fast here because this operation will add to * the overall exit latency. */ -static void menu_reflect(struct cpuidle_device *dev) +static void menu_reflect(struct cpuidle_device *dev, int index) { struct menu_device *data = &__get_cpu_var(menu_devices); - data->needs_update = 1; + data->last_state_idx = index; + if (index >= 0) + data->needs_update = 1; } /** * menu_update - attempts to guess what happened after entry + * @drv: cpuidle driver containing state data * @dev: the CPU */ -static void menu_update(struct cpuidle_device *dev) +static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); int last_idx = data->last_state_idx; unsigned int last_idle_us = cpuidle_get_last_residency(dev); - struct cpuidle_state *target = &dev->states[last_idx]; + struct cpuidle_state *target = &drv->states[last_idx]; unsigned int measured_us; u64 new_factor; @@ -384,9 +387,11 @@ static void menu_update(struct cpuidle_device *dev) /** * menu_enable_device - scans a CPU's states and does setup + * @drv: cpuidle driver * @dev: the CPU */ -static int menu_enable_device(struct cpuidle_device *dev) +static int menu_enable_device(struct cpuidle_driver *drv, + struct cpuidle_device *dev) { struct menu_device *data = &per_cpu(menu_devices, dev->cpu); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index be7917ec40c9..1e756e160dca 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = { struct cpuidle_state_attr { struct attribute attr; - ssize_t (*show)(struct cpuidle_state *, char *); + ssize_t (*show)(struct cpuidle_state *, \ + struct cpuidle_state_usage *, char *); ssize_t (*store)(struct cpuidle_state *, const char *, size_t); }; @@ -224,19 +225,22 @@ struct cpuidle_state_attr { static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) #define define_show_state_function(_name) \ -static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, \ + struct cpuidle_state_usage *state_usage, char *buf) \ { \ return sprintf(buf, "%u\n", state->_name);\ } #define define_show_state_ull_function(_name) \ -static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, \ + struct cpuidle_state_usage *state_usage, char *buf) \ { \ - return sprintf(buf, "%llu\n", state->_name);\ + return sprintf(buf, "%llu\n", state_usage->_name);\ } #define define_show_state_str_function(_name) \ -static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ +static ssize_t show_state_##_name(struct cpuidle_state *state, \ + struct cpuidle_state_usage *state_usage, char *buf) \ { \ if (state->_name[0] == '\0')\ return sprintf(buf, "<null>\n");\ @@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = { #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) #define kobj_to_state(k) (kobj_to_state_obj(k)->state) +#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) static ssize_t cpuidle_state_show(struct kobject * kobj, struct attribute * attr ,char * buf) { int ret = -EIO; struct cpuidle_state *state = kobj_to_state(kobj); + struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); if (cattr->show) - ret = cattr->show(state, buf); + ret = cattr->show(state, state_usage, buf); return ret; } @@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) { int i, ret = -ENOMEM; struct cpuidle_state_kobj *kobj; + struct cpuidle_driver *drv = cpuidle_get_driver(); /* state statistics */ for (i = 0; i < device->state_count; i++) { kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); if (!kobj) goto error_state; - kobj->state = &device->states[i]; + kobj->state = &drv->states[i]; + kobj->state_usage = &device->states_usage[i]; init_completion(&kobj->kobj_unregister); ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 18767f8ab090..5d2f8e13cf0e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -82,7 +82,8 @@ static unsigned int mwait_substates; static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; -static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); +static int intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index); static struct cpuidle_state *cpuidle_state_table; @@ -110,7 +111,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C1 */ .name = "C1-NHM", .desc = "MWAIT 0x00", - .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 3, .target_residency = 6, @@ -118,7 +118,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C2 */ .name = "C3-NHM", .desc = "MWAIT 0x10", - .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, .target_residency = 80, @@ -126,7 +125,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C3 */ .name = "C6-NHM", .desc = "MWAIT 0x20", - .driver_data = (void *) 0x20, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 800, @@ -138,7 +136,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C1 */ .name = "C1-SNB", .desc = "MWAIT 0x00", - .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 1, @@ -146,7 +143,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C2 */ .name = "C3-SNB", .desc = "MWAIT 0x10", - .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 211, @@ -154,7 +150,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C3 */ .name = "C6-SNB", .desc = "MWAIT 0x20", - .driver_data = (void *) 0x20, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, .target_residency = 345, @@ -162,7 +157,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C4 */ .name = "C7-SNB", .desc = "MWAIT 0x30", - .driver_data = (void *) 0x30, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, .target_residency = 345, @@ -174,7 +168,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", - .driver_data = (void *) 0x00, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, @@ -182,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C2 */ .name = "C2-ATM", .desc = "MWAIT 0x10", - .driver_data = (void *) 0x10, .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 20, .target_residency = 80, @@ -191,7 +183,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C4 */ .name = "C4-ATM", .desc = "MWAIT 0x30", - .driver_data = (void *) 0x30, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, @@ -200,23 +191,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", - .driver_data = (void *) 0x52, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, }; +static int get_driver_data(int cstate) +{ + int driver_data; + switch (cstate) { + + case 1: /* MWAIT C1 */ + driver_data = 0x00; + break; + case 2: /* MWAIT C2 */ + driver_data = 0x10; + break; + case 3: /* MWAIT C3 */ + driver_data = 0x20; + break; + case 4: /* MWAIT C4 */ + driver_data = 0x30; + break; + case 5: /* MWAIT C5 */ + driver_data = 0x40; + break; + case 6: /* MWAIT C6 */ + driver_data = 0x52; + break; + default: + driver_data = 0x00; + } + return driver_data; +} + /** * intel_idle * @dev: cpuidle_device - * @state: cpuidle state + * @drv: cpuidle driver + * @index: index of cpuidle state * */ -static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) +static int intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { unsigned long ecx = 1; /* break on interrupt flag */ - unsigned long eax = (unsigned long)cpuidle_get_statedata(state); + struct cpuidle_state *state = &drv->states[index]; + struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; + unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); unsigned int cstate; ktime_t kt_before, kt_after; s64 usec_delta; @@ -257,7 +280,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); - return usec_delta; + /* Update cpuidle counters */ + dev->last_residency = (int)usec_delta; + + return index; } static void __setup_broadcast_timer(void *arg) @@ -398,6 +424,60 @@ static void intel_idle_cpuidle_devices_uninit(void) return; } /* + * intel_idle_cpuidle_driver_init() + * allocate, initialize cpuidle_states + */ +static int intel_idle_cpuidle_driver_init(void) +{ + int cstate; + struct cpuidle_driver *drv = &intel_idle_driver; + + drv->state_count = 1; + + for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { + int num_substates; + + if (cstate > max_cstate) { + printk(PREFIX "max_cstate %d reached\n", + max_cstate); + break; + } + + /* does the state exist in CPUID.MWAIT? */ + num_substates = (mwait_substates >> ((cstate) * 4)) + & MWAIT_SUBSTATE_MASK; + if (num_substates == 0) + continue; + /* is the state not enabled? */ + if (cpuidle_state_table[cstate].enter == NULL) { + /* does the driver not know about the state? */ + if (*cpuidle_state_table[cstate].name == '\0') + pr_debug(PREFIX "unaware of model 0x%x" + " MWAIT %d please" + " contact lenb@kernel.org", + boot_cpu_data.x86_model, cstate); + continue; + } + + if ((cstate > 2) && + !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle" + " states deeper than C2"); + + drv->states[drv->state_count] = /* structure copy */ + cpuidle_state_table[cstate]; + + drv->state_count += 1; + } + + if (auto_demotion_disable_flags) + smp_call_function(auto_demotion_disable, NULL, 1); + + return 0; +} + + +/* * intel_idle_cpuidle_devices_init() * allocate, initialize, register cpuidle_devices */ @@ -431,22 +511,11 @@ static int intel_idle_cpuidle_devices_init(void) continue; /* is the state not enabled? */ if (cpuidle_state_table[cstate].enter == NULL) { - /* does the driver not know about the state? */ - if (*cpuidle_state_table[cstate].name == '\0') - pr_debug(PREFIX "unaware of model 0x%x" - " MWAIT %d please" - " contact lenb@kernel.org", - boot_cpu_data.x86_model, cstate); continue; } - if ((cstate > 2) && - !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - mark_tsc_unstable("TSC halts in idle" - " states deeper than C2"); - - dev->states[dev->state_count] = /* structure copy */ - cpuidle_state_table[cstate]; + dev->states_usage[dev->state_count].driver_data = + (void *)get_driver_data(cstate); dev->state_count += 1; } @@ -459,8 +528,6 @@ static int intel_idle_cpuidle_devices_init(void) return -EIO; } } - if (auto_demotion_disable_flags) - smp_call_function(auto_demotion_disable, NULL, 1); return 0; } @@ -478,6 +545,7 @@ static int __init intel_idle_init(void) if (retval) return retval; + intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index bbf3edd85beb..5be4a392a3ae 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -509,15 +509,12 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, struct acpi_resource_dma *p) { int i; - unsigned char map = 0, flags = 0; - - if (p->channel_count == 0) - flags |= IORESOURCE_DISABLED; + unsigned char map = 0, flags; for (i = 0; i < p->channel_count; i++) map |= 1 << p->channels[i]; - flags |= dma_flags(dev, p->type, p->bus_master, p->transfer); + flags = dma_flags(dev, p->type, p->bus_master, p->transfer); pnp_register_dma_resource(dev, option_flags, map, flags); } @@ -527,17 +524,14 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, { int i; pnp_irq_mask_t map; - unsigned char flags = 0; - - if (p->interrupt_count == 0) - flags |= IORESOURCE_DISABLED; + unsigned char flags; bitmap_zero(map.bits, PNP_IRQ_NR); for (i = 0; i < p->interrupt_count; i++) if (p->interrupts[i]) __set_bit(p->interrupts[i], map.bits); - flags |= irq_flags(p->triggering, p->polarity, p->sharable); + flags = irq_flags(p->triggering, p->polarity, p->sharable); pnp_register_irq_resource(dev, option_flags, &map, flags); } @@ -547,10 +541,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, { int i; pnp_irq_mask_t map; - unsigned char flags = 0; - - if (p->interrupt_count == 0) - flags |= IORESOURCE_DISABLED; + unsigned char flags; bitmap_zero(map.bits, PNP_IRQ_NR); for (i = 0; i < p->interrupt_count; i++) { @@ -564,7 +555,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, } } - flags |= irq_flags(p->triggering, p->polarity, p->sharable); + flags = irq_flags(p->triggering, p->polarity, p->sharable); pnp_register_irq_resource(dev, option_flags, &map, flags); } @@ -574,11 +565,8 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, { unsigned char flags = 0; - if (io->address_length == 0) - flags |= IORESOURCE_DISABLED; - if (io->io_decode == ACPI_DECODE_16) - flags |= IORESOURCE_IO_16BIT_ADDR; + flags = IORESOURCE_IO_16BIT_ADDR; pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, io->alignment, io->address_length, flags); } @@ -587,13 +575,8 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, unsigned int option_flags, struct acpi_resource_fixed_io *io) { - unsigned char flags = 0; - - if (io->address_length == 0) - flags |= IORESOURCE_DISABLED; - pnp_register_port_resource(dev, option_flags, io->address, io->address, - 0, io->address_length, flags | IORESOURCE_IO_FIXED); + 0, io->address_length, IORESOURCE_IO_FIXED); } static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, @@ -602,11 +585,8 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, { unsigned char flags = 0; - if (p->address_length == 0) - flags |= IORESOURCE_DISABLED; - if (p->write_protect == ACPI_READ_WRITE_MEMORY) - flags |= IORESOURCE_MEM_WRITEABLE; + flags = IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, p->alignment, p->address_length, flags); } @@ -617,11 +597,8 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, { unsigned char flags = 0; - if (p->address_length == 0) - flags |= IORESOURCE_DISABLED; - if (p->write_protect == ACPI_READ_WRITE_MEMORY) - flags |= IORESOURCE_MEM_WRITEABLE; + flags = IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, p->alignment, p->address_length, flags); } @@ -632,11 +609,8 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, { unsigned char flags = 0; - if (p->address_length == 0) - flags |= IORESOURCE_DISABLED; - if (p->write_protect == ACPI_READ_WRITE_MEMORY) - flags |= IORESOURCE_MEM_WRITEABLE; + flags = IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->address, p->address, 0, p->address_length, flags); } @@ -656,19 +630,16 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, return; } - if (p->address_length == 0) - flags |= IORESOURCE_DISABLED; - if (p->resource_type == ACPI_MEMORY_RANGE) { if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) - flags |= IORESOURCE_MEM_WRITEABLE; + flags = IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, flags); } else if (p->resource_type == ACPI_IO_RANGE) pnp_register_port_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, - flags | IORESOURCE_IO_FIXED); + IORESOURCE_IO_FIXED); } static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, @@ -678,19 +649,16 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, struct acpi_resource_extended_address64 *p = &r->data.ext_address64; unsigned char flags = 0; - if (p->address_length == 0) - flags |= IORESOURCE_DISABLED; - if (p->resource_type == ACPI_MEMORY_RANGE) { if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) - flags |= IORESOURCE_MEM_WRITEABLE; + flags = IORESOURCE_MEM_WRITEABLE; pnp_register_mem_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, flags); } else if (p->resource_type == ACPI_IO_RANGE) pnp_register_port_resource(dev, option_flags, p->minimum, p->minimum, 0, p->address_length, - flags | IORESOURCE_IO_FIXED); + IORESOURCE_IO_FIXED); } struct acpipnp_parse_option_s { diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 708f8e92771a..dd9a5743fa99 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -678,10 +678,10 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, return; if (delay > 1000) - schedule_delayed_work(&(tz->poll_queue), + queue_delayed_work(system_freezable_wq, &(tz->poll_queue), round_jiffies(msecs_to_jiffies(delay))); else - schedule_delayed_work(&(tz->poll_queue), + queue_delayed_work(system_freezable_wq, &(tz->poll_queue), msecs_to_jiffies(delay)); } |