]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/acpi/acpi_pad.c
Merge branch 'tty-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[net-next-2.6.git] / drivers / acpi / acpi_pad.c
CommitLineData
8e0af514
SL
1/*
2 * acpi_pad.c ACPI Processor Aggregator Driver
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/cpumask.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/kthread.h>
27#include <linux/freezer.h>
28#include <linux/cpu.h>
29#include <linux/clockchips.h>
5a0e3ad6 30#include <linux/slab.h>
8e0af514
SL
31#include <acpi/acpi_bus.h>
32#include <acpi/acpi_drivers.h>
bc83cccc 33#include <asm/mwait.h>
8e0af514 34
a40770a9 35#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
8e0af514
SL
36#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
37#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
38static DEFINE_MUTEX(isolated_cpus_lock);
39
8e0af514 40static unsigned long power_saving_mwait_eax;
0dc698b9
VP
41
42static unsigned char tsc_detected_unstable;
43static unsigned char tsc_marked_unstable;
8aa4b14e
CG
44static unsigned char lapic_detected_unstable;
45static unsigned char lapic_marked_unstable;
0dc698b9 46
8e0af514
SL
47static void power_saving_mwait_init(void)
48{
49 unsigned int eax, ebx, ecx, edx;
50 unsigned int highest_cstate = 0;
51 unsigned int highest_subcstate = 0;
52 int i;
53
54 if (!boot_cpu_has(X86_FEATURE_MWAIT))
55 return;
56 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
57 return;
58
59 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
60
61 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
62 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
63 return;
64
65 edx >>= MWAIT_SUBSTATE_SIZE;
66 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
67 if (edx & MWAIT_SUBSTATE_MASK) {
68 highest_cstate = i;
69 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
70 }
71 }
72 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
73 (highest_subcstate - 1);
74
592913ec 75#if defined(CONFIG_X86)
8e0af514
SL
76 switch (boot_cpu_data.x86_vendor) {
77 case X86_VENDOR_AMD:
78 case X86_VENDOR_INTEL:
79 /*
80 * AMD Fam10h TSC will tick in all
81 * C/P/S0/S1 states when this bit is set.
82 */
8aa4b14e
CG
83 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
84 tsc_detected_unstable = 1;
85 if (!boot_cpu_has(X86_FEATURE_ARAT))
86 lapic_detected_unstable = 1;
87 break;
8e0af514 88 default:
8aa4b14e 89 /* TSC & LAPIC could halt in idle */
0dc698b9 90 tsc_detected_unstable = 1;
8aa4b14e 91 lapic_detected_unstable = 1;
8e0af514
SL
92 }
93#endif
94}
95
96static unsigned long cpu_weight[NR_CPUS];
97static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
98static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
99static void round_robin_cpu(unsigned int tsk_index)
100{
101 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
102 cpumask_var_t tmp;
103 int cpu;
f67538f8
AM
104 unsigned long min_weight = -1;
105 unsigned long uninitialized_var(preferred_cpu);
8e0af514
SL
106
107 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
108 return;
109
110 mutex_lock(&isolated_cpus_lock);
111 cpumask_clear(tmp);
112 for_each_cpu(cpu, pad_busy_cpus)
113 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
114 cpumask_andnot(tmp, cpu_online_mask, tmp);
115 /* avoid HT sibilings if possible */
116 if (cpumask_empty(tmp))
117 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
118 if (cpumask_empty(tmp)) {
119 mutex_unlock(&isolated_cpus_lock);
120 return;
121 }
122 for_each_cpu(cpu, tmp) {
123 if (cpu_weight[cpu] < min_weight) {
124 min_weight = cpu_weight[cpu];
125 preferred_cpu = cpu;
126 }
127 }
128
129 if (tsk_in_cpu[tsk_index] != -1)
130 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
131 tsk_in_cpu[tsk_index] = preferred_cpu;
132 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
133 cpu_weight[preferred_cpu]++;
134 mutex_unlock(&isolated_cpus_lock);
135
136 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
137}
138
139static void exit_round_robin(unsigned int tsk_index)
140{
141 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
142 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
143 tsk_in_cpu[tsk_index] = -1;
144}
145
146static unsigned int idle_pct = 5; /* percentage */
147static unsigned int round_robin_time = 10; /* second */
148static int power_saving_thread(void *data)
149{
150 struct sched_param param = {.sched_priority = 1};
151 int do_sleep;
152 unsigned int tsk_index = (unsigned long)data;
153 u64 last_jiffies = 0;
154
155 sched_setscheduler(current, SCHED_RR, &param);
156
157 while (!kthread_should_stop()) {
158 int cpu;
159 u64 expire_time;
160
161 try_to_freeze();
162
163 /* round robin to cpus */
164 if (last_jiffies + round_robin_time * HZ < jiffies) {
165 last_jiffies = jiffies;
166 round_robin_cpu(tsk_index);
167 }
168
169 do_sleep = 0;
170
8e0af514
SL
171 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
172
173 while (!need_resched()) {
0dc698b9
VP
174 if (tsc_detected_unstable && !tsc_marked_unstable) {
175 /* TSC could halt in idle, so notify users */
176 mark_tsc_unstable("TSC halts in idle");
177 tsc_marked_unstable = 1;
178 }
8aa4b14e
CG
179 if (lapic_detected_unstable && !lapic_marked_unstable) {
180 int i;
181 /* LAPIC could halt in idle, so notify users */
182 for_each_online_cpu(i)
183 clockevents_notify(
184 CLOCK_EVT_NOTIFY_BROADCAST_ON,
185 &i);
186 lapic_marked_unstable = 1;
187 }
8e0af514
SL
188 local_irq_disable();
189 cpu = smp_processor_id();
8aa4b14e
CG
190 if (lapic_marked_unstable)
191 clockevents_notify(
192 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
8e0af514
SL
193 stop_critical_timings();
194
195 __monitor((void *)&current_thread_info()->flags, 0, 0);
196 smp_mb();
197 if (!need_resched())
198 __mwait(power_saving_mwait_eax, 1);
199
200 start_critical_timings();
8aa4b14e
CG
201 if (lapic_marked_unstable)
202 clockevents_notify(
203 CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
8e0af514
SL
204 local_irq_enable();
205
206 if (jiffies > expire_time) {
207 do_sleep = 1;
208 break;
209 }
210 }
211
8e0af514
SL
212 /*
213 * current sched_rt has threshold for rt task running time.
214 * When a rt task uses 95% CPU time, the rt thread will be
215 * scheduled out for 5% CPU time to not starve other tasks. But
216 * the mechanism only works when all CPUs have RT task running,
217 * as if one CPU hasn't RT task, RT task from other CPUs will
218 * borrow CPU time from this CPU and cause RT task use > 95%
3b8cb427 219 * CPU time. To make 'avoid starvation' work, takes a nap here.
8e0af514
SL
220 */
221 if (do_sleep)
222 schedule_timeout_killable(HZ * idle_pct / 100);
223 }
224
225 exit_round_robin(tsk_index);
226 return 0;
227}
228
229static struct task_struct *ps_tsks[NR_CPUS];
230static unsigned int ps_tsk_num;
231static int create_power_saving_task(void)
232{
3b8cb427
CG
233 int rc = -ENOMEM;
234
8e0af514
SL
235 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
236 (void *)(unsigned long)ps_tsk_num,
237 "power_saving/%d", ps_tsk_num);
3b8cb427
CG
238 rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
239 if (!rc)
8e0af514 240 ps_tsk_num++;
3b8cb427
CG
241 else
242 ps_tsks[ps_tsk_num] = NULL;
243
244 return rc;
8e0af514
SL
245}
246
247static void destroy_power_saving_task(void)
248{
249 if (ps_tsk_num > 0) {
250 ps_tsk_num--;
251 kthread_stop(ps_tsks[ps_tsk_num]);
3b8cb427 252 ps_tsks[ps_tsk_num] = NULL;
8e0af514
SL
253 }
254}
255
256static void set_power_saving_task_num(unsigned int num)
257{
258 if (num > ps_tsk_num) {
259 while (ps_tsk_num < num) {
260 if (create_power_saving_task())
261 return;
262 }
263 } else if (num < ps_tsk_num) {
264 while (ps_tsk_num > num)
265 destroy_power_saving_task();
266 }
267}
268
3b8cb427 269static void acpi_pad_idle_cpus(unsigned int num_cpus)
8e0af514
SL
270{
271 get_online_cpus();
272
273 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
274 set_power_saving_task_num(num_cpus);
275
276 put_online_cpus();
8e0af514
SL
277}
278
279static uint32_t acpi_pad_idle_cpus_num(void)
280{
281 return ps_tsk_num;
282}
283
284static ssize_t acpi_pad_rrtime_store(struct device *dev,
285 struct device_attribute *attr, const char *buf, size_t count)
286{
287 unsigned long num;
288 if (strict_strtoul(buf, 0, &num))
289 return -EINVAL;
290 if (num < 1 || num >= 100)
291 return -EINVAL;
292 mutex_lock(&isolated_cpus_lock);
293 round_robin_time = num;
294 mutex_unlock(&isolated_cpus_lock);
295 return count;
296}
297
298static ssize_t acpi_pad_rrtime_show(struct device *dev,
299 struct device_attribute *attr, char *buf)
300{
301 return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
302}
303static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
304 acpi_pad_rrtime_show,
305 acpi_pad_rrtime_store);
306
307static ssize_t acpi_pad_idlepct_store(struct device *dev,
308 struct device_attribute *attr, const char *buf, size_t count)
309{
310 unsigned long num;
311 if (strict_strtoul(buf, 0, &num))
312 return -EINVAL;
313 if (num < 1 || num >= 100)
314 return -EINVAL;
315 mutex_lock(&isolated_cpus_lock);
316 idle_pct = num;
317 mutex_unlock(&isolated_cpus_lock);
318 return count;
319}
320
321static ssize_t acpi_pad_idlepct_show(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
325}
326static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
327 acpi_pad_idlepct_show,
328 acpi_pad_idlepct_store);
329
330static ssize_t acpi_pad_idlecpus_store(struct device *dev,
331 struct device_attribute *attr, const char *buf, size_t count)
332{
333 unsigned long num;
334 if (strict_strtoul(buf, 0, &num))
335 return -EINVAL;
336 mutex_lock(&isolated_cpus_lock);
337 acpi_pad_idle_cpus(num);
338 mutex_unlock(&isolated_cpus_lock);
339 return count;
340}
341
342static ssize_t acpi_pad_idlecpus_show(struct device *dev,
343 struct device_attribute *attr, char *buf)
344{
345 return cpumask_scnprintf(buf, PAGE_SIZE,
346 to_cpumask(pad_busy_cpus_bits));
347}
348static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
349 acpi_pad_idlecpus_show,
350 acpi_pad_idlecpus_store);
351
352static int acpi_pad_add_sysfs(struct acpi_device *device)
353{
354 int result;
355
356 result = device_create_file(&device->dev, &dev_attr_idlecpus);
357 if (result)
358 return -ENODEV;
359 result = device_create_file(&device->dev, &dev_attr_idlepct);
360 if (result) {
361 device_remove_file(&device->dev, &dev_attr_idlecpus);
362 return -ENODEV;
363 }
364 result = device_create_file(&device->dev, &dev_attr_rrtime);
365 if (result) {
366 device_remove_file(&device->dev, &dev_attr_idlecpus);
367 device_remove_file(&device->dev, &dev_attr_idlepct);
368 return -ENODEV;
369 }
370 return 0;
371}
372
373static void acpi_pad_remove_sysfs(struct acpi_device *device)
374{
375 device_remove_file(&device->dev, &dev_attr_idlecpus);
376 device_remove_file(&device->dev, &dev_attr_idlepct);
377 device_remove_file(&device->dev, &dev_attr_rrtime);
378}
379
c9ad8e06
LB
380/*
381 * Query firmware how many CPUs should be idle
382 * return -1 on failure
383 */
384static int acpi_pad_pur(acpi_handle handle)
8e0af514
SL
385{
386 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
8e0af514 387 union acpi_object *package;
c9ad8e06 388 int num = -1;
8e0af514 389
3b8cb427 390 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
c9ad8e06 391 return num;
3b8cb427
CG
392
393 if (!buffer.length || !buffer.pointer)
c9ad8e06 394 return num;
3b8cb427 395
8e0af514 396 package = buffer.pointer;
c9ad8e06
LB
397
398 if (package->type == ACPI_TYPE_PACKAGE &&
399 package->package.count == 2 &&
400 package->package.elements[0].integer.value == 1) /* rev 1 */
401
402 num = package->package.elements[1].integer.value;
403
8e0af514 404 kfree(buffer.pointer);
c9ad8e06 405 return num;
8e0af514
SL
406}
407
408/* Notify firmware how many CPUs are idle */
409static void acpi_pad_ost(acpi_handle handle, int stat,
410 uint32_t idle_cpus)
411{
412 union acpi_object params[3] = {
413 {.type = ACPI_TYPE_INTEGER,},
414 {.type = ACPI_TYPE_INTEGER,},
415 {.type = ACPI_TYPE_BUFFER,},
416 };
417 struct acpi_object_list arg_list = {3, params};
418
419 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
420 params[1].integer.value = stat;
421 params[2].buffer.length = 4;
422 params[2].buffer.pointer = (void *)&idle_cpus;
423 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
424}
425
426static void acpi_pad_handle_notify(acpi_handle handle)
427{
3b8cb427 428 int num_cpus;
8e0af514
SL
429 uint32_t idle_cpus;
430
431 mutex_lock(&isolated_cpus_lock);
c9ad8e06
LB
432 num_cpus = acpi_pad_pur(handle);
433 if (num_cpus < 0) {
8e0af514
SL
434 mutex_unlock(&isolated_cpus_lock);
435 return;
436 }
3b8cb427 437 acpi_pad_idle_cpus(num_cpus);
8e0af514 438 idle_cpus = acpi_pad_idle_cpus_num();
3b8cb427 439 acpi_pad_ost(handle, 0, idle_cpus);
8e0af514
SL
440 mutex_unlock(&isolated_cpus_lock);
441}
442
443static void acpi_pad_notify(acpi_handle handle, u32 event,
444 void *data)
445{
446 struct acpi_device *device = data;
447
448 switch (event) {
449 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
450 acpi_pad_handle_notify(handle);
451 acpi_bus_generate_proc_event(device, event, 0);
452 acpi_bus_generate_netlink_event(device->pnp.device_class,
453 dev_name(&device->dev), event, 0);
454 break;
455 default:
456 printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
457 break;
458 }
459}
460
461static int acpi_pad_add(struct acpi_device *device)
462{
463 acpi_status status;
464
465 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
466 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
467
468 if (acpi_pad_add_sysfs(device))
469 return -ENODEV;
470
471 status = acpi_install_notify_handler(device->handle,
472 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
473 if (ACPI_FAILURE(status)) {
474 acpi_pad_remove_sysfs(device);
475 return -ENODEV;
476 }
477
478 return 0;
479}
480
481static int acpi_pad_remove(struct acpi_device *device,
482 int type)
483{
484 mutex_lock(&isolated_cpus_lock);
485 acpi_pad_idle_cpus(0);
486 mutex_unlock(&isolated_cpus_lock);
487
488 acpi_remove_notify_handler(device->handle,
489 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
490 acpi_pad_remove_sysfs(device);
491 return 0;
492}
493
494static const struct acpi_device_id pad_device_ids[] = {
495 {"ACPI000C", 0},
496 {"", 0},
497};
498MODULE_DEVICE_TABLE(acpi, pad_device_ids);
499
500static struct acpi_driver acpi_pad_driver = {
501 .name = "processor_aggregator",
502 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
503 .ids = pad_device_ids,
504 .ops = {
505 .add = acpi_pad_add,
506 .remove = acpi_pad_remove,
507 },
508};
509
510static int __init acpi_pad_init(void)
511{
512 power_saving_mwait_init();
513 if (power_saving_mwait_eax == 0)
514 return -EINVAL;
515
516 return acpi_bus_register_driver(&acpi_pad_driver);
517}
518
519static void __exit acpi_pad_exit(void)
520{
521 acpi_bus_unregister_driver(&acpi_pad_driver);
522}
523
524module_init(acpi_pad_init);
525module_exit(acpi_pad_exit);
526MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
527MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
528MODULE_LICENSE("GPL");