]>
Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * menu.c - the menu idle governor | |
3 | * | |
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | |
69d25870 AV |
5 | * Copyright (C) 2009 Intel Corporation |
6 | * Author: | |
7 | * Arjan van de Ven <arjan@linux.intel.com> | |
4f86d3a8 | 8 | * |
69d25870 AV |
9 | * This code is licenced under the GPL version 2 as described |
10 | * in the COPYING file that acompanies the Linux Kernel. | |
4f86d3a8 LB |
11 | */ |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/cpuidle.h> | |
d82b3518 | 15 | #include <linux/pm_qos_params.h> |
4f86d3a8 LB |
16 | #include <linux/time.h> |
17 | #include <linux/ktime.h> | |
18 | #include <linux/hrtimer.h> | |
19 | #include <linux/tick.h> | |
69d25870 | 20 | #include <linux/sched.h> |
5787536e | 21 | #include <linux/math64.h> |
4f86d3a8 | 22 | |
69d25870 | 23 | #define BUCKETS 12 |
1f85f87d | 24 | #define INTERVALS 8 |
69d25870 | 25 | #define RESOLUTION 1024 |
1f85f87d | 26 | #define DECAY 8 |
69d25870 | 27 | #define MAX_INTERESTING 50000 |
1f85f87d AV |
28 | #define STDDEV_THRESH 400 |
29 | ||
69d25870 AV |
30 | |
31 | /* | |
32 | * Concepts and ideas behind the menu governor | |
33 | * | |
34 | * For the menu governor, there are 3 decision factors for picking a C | |
35 | * state: | |
36 | * 1) Energy break even point | |
37 | * 2) Performance impact | |
38 | * 3) Latency tolerance (from pmqos infrastructure) | |
39 | * These these three factors are treated independently. | |
40 | * | |
41 | * Energy break even point | |
42 | * ----------------------- | |
43 | * C state entry and exit have an energy cost, and a certain amount of time in | |
44 | * the C state is required to actually break even on this cost. CPUIDLE | |
45 | * provides us this duration in the "target_residency" field. So all that we | |
46 | * need is a good prediction of how long we'll be idle. Like the traditional | |
47 | * menu governor, we start with the actual known "next timer event" time. | |
48 | * | |
49 | * Since there are other source of wakeups (interrupts for example) than | |
50 | * the next timer event, this estimation is rather optimistic. To get a | |
51 | * more realistic estimate, a correction factor is applied to the estimate, | |
52 | * that is based on historic behavior. For example, if in the past the actual | |
53 | * duration always was 50% of the next timer tick, the correction factor will | |
54 | * be 0.5. | |
55 | * | |
56 | * menu uses a running average for this correction factor, however it uses a | |
57 | * set of factors, not just a single factor. This stems from the realization | |
58 | * that the ratio is dependent on the order of magnitude of the expected | |
59 | * duration; if we expect 500 milliseconds of idle time the likelihood of | |
60 | * getting an interrupt very early is much higher than if we expect 50 micro | |
61 | * seconds of idle time. A second independent factor that has big impact on | |
62 | * the actual factor is if there is (disk) IO outstanding or not. | |
63 | * (as a special twist, we consider every sleep longer than 50 milliseconds | |
64 | * as perfect; there are no power gains for sleeping longer than this) | |
65 | * | |
66 | * For these two reasons we keep an array of 12 independent factors, that gets | |
67 | * indexed based on the magnitude of the expected duration as well as the | |
68 | * "is IO outstanding" property. | |
69 | * | |
1f85f87d AV |
70 | * Repeatable-interval-detector |
71 | * ---------------------------- | |
72 | * There are some cases where "next timer" is a completely unusable predictor: | |
73 | * Those cases where the interval is fixed, for example due to hardware | |
74 | * interrupt mitigation, but also due to fixed transfer rate devices such as | |
75 | * mice. | |
76 | * For this, we use a different predictor: We track the duration of the last 8 | |
77 | * intervals and if the stand deviation of these 8 intervals is below a | |
78 | * threshold value, we use the average of these intervals as prediction. | |
79 | * | |
69d25870 AV |
80 | * Limiting Performance Impact |
81 | * --------------------------- | |
82 | * C states, especially those with large exit latencies, can have a real | |
83 | * noticable impact on workloads, which is not acceptable for most sysadmins, | |
84 | * and in addition, less performance has a power price of its own. | |
85 | * | |
86 | * As a general rule of thumb, menu assumes that the following heuristic | |
87 | * holds: | |
88 | * The busier the system, the less impact of C states is acceptable | |
89 | * | |
90 | * This rule-of-thumb is implemented using a performance-multiplier: | |
91 | * If the exit latency times the performance multiplier is longer than | |
92 | * the predicted duration, the C state is not considered a candidate | |
93 | * for selection due to a too high performance impact. So the higher | |
94 | * this multiplier is, the longer we need to be idle to pick a deep C | |
95 | * state, and thus the less likely a busy CPU will hit such a deep | |
96 | * C state. | |
97 | * | |
98 | * Two factors are used in determing this multiplier: | |
99 | * a value of 10 is added for each point of "per cpu load average" we have. | |
100 | * a value of 5 points is added for each process that is waiting for | |
101 | * IO on this CPU. | |
102 | * (these values are experimentally determined) | |
103 | * | |
104 | * The load average factor gives a longer term (few seconds) input to the | |
105 | * decision, while the iowait value gives a cpu local instantanious input. | |
106 | * The iowait factor may look low, but realize that this is also already | |
107 | * represented in the system load average. | |
108 | * | |
109 | */ | |
4f86d3a8 LB |
110 | |
111 | struct menu_device { | |
112 | int last_state_idx; | |
672917dc | 113 | int needs_update; |
4f86d3a8 LB |
114 | |
115 | unsigned int expected_us; | |
56e6943b | 116 | u64 predicted_us; |
69d25870 AV |
117 | unsigned int exit_us; |
118 | unsigned int bucket; | |
119 | u64 correction_factor[BUCKETS]; | |
1f85f87d AV |
120 | u32 intervals[INTERVALS]; |
121 | int interval_ptr; | |
4f86d3a8 LB |
122 | }; |
123 | ||
69d25870 AV |
124 | |
125 | #define LOAD_INT(x) ((x) >> FSHIFT) | |
126 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | |
127 | ||
128 | static int get_loadavg(void) | |
129 | { | |
130 | unsigned long this = this_cpu_load(); | |
131 | ||
132 | ||
133 | return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; | |
134 | } | |
135 | ||
136 | static inline int which_bucket(unsigned int duration) | |
137 | { | |
138 | int bucket = 0; | |
139 | ||
140 | /* | |
141 | * We keep two groups of stats; one with no | |
142 | * IO pending, one without. | |
143 | * This allows us to calculate | |
144 | * E(duration)|iowait | |
145 | */ | |
8c215bd3 | 146 | if (nr_iowait_cpu(smp_processor_id())) |
69d25870 AV |
147 | bucket = BUCKETS/2; |
148 | ||
149 | if (duration < 10) | |
150 | return bucket; | |
151 | if (duration < 100) | |
152 | return bucket + 1; | |
153 | if (duration < 1000) | |
154 | return bucket + 2; | |
155 | if (duration < 10000) | |
156 | return bucket + 3; | |
157 | if (duration < 100000) | |
158 | return bucket + 4; | |
159 | return bucket + 5; | |
160 | } | |
161 | ||
162 | /* | |
163 | * Return a multiplier for the exit latency that is intended | |
164 | * to take performance requirements into account. | |
165 | * The more performance critical we estimate the system | |
166 | * to be, the higher this multiplier, and thus the higher | |
167 | * the barrier to go to an expensive C state. | |
168 | */ | |
169 | static inline int performance_multiplier(void) | |
170 | { | |
171 | int mult = 1; | |
172 | ||
173 | /* for higher loadavg, we are more reluctant */ | |
174 | ||
175 | mult += 2 * get_loadavg(); | |
176 | ||
177 | /* for IO wait tasks (per cpu!) we add 5x each */ | |
8c215bd3 | 178 | mult += 10 * nr_iowait_cpu(smp_processor_id()); |
69d25870 AV |
179 | |
180 | return mult; | |
181 | } | |
182 | ||
4f86d3a8 LB |
183 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
184 | ||
672917dc CZ |
185 | static void menu_update(struct cpuidle_device *dev); |
186 | ||
5787536e SH |
187 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
188 | static u64 div_round64(u64 dividend, u32 divisor) | |
189 | { | |
190 | return div_u64(dividend + (divisor / 2), divisor); | |
191 | } | |
192 | ||
1f85f87d AV |
193 | /* |
194 | * Try detecting repeating patterns by keeping track of the last 8 | |
195 | * intervals, and checking if the standard deviation of that set | |
196 | * of points is below a threshold. If it is... then use the | |
197 | * average of these 8 points as the estimated value. | |
198 | */ | |
199 | static void detect_repeating_patterns(struct menu_device *data) | |
200 | { | |
201 | int i; | |
202 | uint64_t avg = 0; | |
203 | uint64_t stddev = 0; /* contains the square of the std deviation */ | |
204 | ||
205 | /* first calculate average and standard deviation of the past */ | |
206 | for (i = 0; i < INTERVALS; i++) | |
207 | avg += data->intervals[i]; | |
208 | avg = avg / INTERVALS; | |
209 | ||
210 | /* if the avg is beyond the known next tick, it's worthless */ | |
211 | if (avg > data->expected_us) | |
212 | return; | |
213 | ||
214 | for (i = 0; i < INTERVALS; i++) | |
215 | stddev += (data->intervals[i] - avg) * | |
216 | (data->intervals[i] - avg); | |
217 | ||
218 | stddev = stddev / INTERVALS; | |
219 | ||
220 | /* | |
221 | * now.. if stddev is small.. then assume we have a | |
222 | * repeating pattern and predict we keep doing this. | |
223 | */ | |
224 | ||
225 | if (avg && stddev < STDDEV_THRESH) | |
226 | data->predicted_us = avg; | |
227 | } | |
228 | ||
4f86d3a8 LB |
229 | /** |
230 | * menu_select - selects the next idle state to enter | |
231 | * @dev: the CPU | |
232 | */ | |
233 | static int menu_select(struct cpuidle_device *dev) | |
234 | { | |
235 | struct menu_device *data = &__get_cpu_var(menu_devices); | |
ed77134b | 236 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
4f86d3a8 | 237 | int i; |
69d25870 AV |
238 | int multiplier; |
239 | ||
672917dc CZ |
240 | if (data->needs_update) { |
241 | menu_update(dev); | |
242 | data->needs_update = 0; | |
243 | } | |
244 | ||
1c6fe036 AV |
245 | data->last_state_idx = 0; |
246 | data->exit_us = 0; | |
247 | ||
a2bd9202 | 248 | /* Special case when user has set very strict latency requirement */ |
69d25870 | 249 | if (unlikely(latency_req == 0)) |
a2bd9202 | 250 | return 0; |
a2bd9202 | 251 | |
69d25870 | 252 | /* determine the expected residency time, round up */ |
4f86d3a8 | 253 | data->expected_us = |
69d25870 AV |
254 | DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); |
255 | ||
256 | ||
257 | data->bucket = which_bucket(data->expected_us); | |
258 | ||
259 | multiplier = performance_multiplier(); | |
260 | ||
261 | /* | |
262 | * if the correction factor is 0 (eg first time init or cpu hotplug | |
263 | * etc), we actually want to start out with a unity factor. | |
264 | */ | |
265 | if (data->correction_factor[data->bucket] == 0) | |
266 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | |
267 | ||
268 | /* Make sure to round up for half microseconds */ | |
5787536e SH |
269 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
270 | RESOLUTION * DECAY); | |
69d25870 | 271 | |
1f85f87d AV |
272 | detect_repeating_patterns(data); |
273 | ||
69d25870 AV |
274 | /* |
275 | * We want to default to C1 (hlt), not to busy polling | |
276 | * unless the timer is happening really really soon. | |
277 | */ | |
278 | if (data->expected_us > 5) | |
279 | data->last_state_idx = CPUIDLE_DRIVER_STATE_START; | |
4f86d3a8 | 280 | |
816bb611 | 281 | |
4f86d3a8 | 282 | /* find the deepest idle state that satisfies our constraints */ |
69d25870 | 283 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { |
4f86d3a8 LB |
284 | struct cpuidle_state *s = &dev->states[i]; |
285 | ||
4f86d3a8 LB |
286 | if (s->target_residency > data->predicted_us) |
287 | break; | |
a2bd9202 | 288 | if (s->exit_latency > latency_req) |
4f86d3a8 | 289 | break; |
69d25870 AV |
290 | if (s->exit_latency * multiplier > data->predicted_us) |
291 | break; | |
292 | data->exit_us = s->exit_latency; | |
293 | data->last_state_idx = i; | |
4f86d3a8 LB |
294 | } |
295 | ||
69d25870 | 296 | return data->last_state_idx; |
4f86d3a8 LB |
297 | } |
298 | ||
299 | /** | |
672917dc | 300 | * menu_reflect - records that data structures need update |
4f86d3a8 LB |
301 | * @dev: the CPU |
302 | * | |
303 | * NOTE: it's important to be fast here because this operation will add to | |
304 | * the overall exit latency. | |
305 | */ | |
306 | static void menu_reflect(struct cpuidle_device *dev) | |
672917dc CZ |
307 | { |
308 | struct menu_device *data = &__get_cpu_var(menu_devices); | |
309 | data->needs_update = 1; | |
310 | } | |
311 | ||
312 | /** | |
313 | * menu_update - attempts to guess what happened after entry | |
314 | * @dev: the CPU | |
315 | */ | |
316 | static void menu_update(struct cpuidle_device *dev) | |
4f86d3a8 LB |
317 | { |
318 | struct menu_device *data = &__get_cpu_var(menu_devices); | |
319 | int last_idx = data->last_state_idx; | |
320eee77 | 320 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
4f86d3a8 | 321 | struct cpuidle_state *target = &dev->states[last_idx]; |
320eee77 | 322 | unsigned int measured_us; |
69d25870 | 323 | u64 new_factor; |
4f86d3a8 LB |
324 | |
325 | /* | |
326 | * Ugh, this idle state doesn't support residency measurements, so we | |
327 | * are basically lost in the dark. As a compromise, assume we slept | |
69d25870 | 328 | * for the whole expected time. |
4f86d3a8 | 329 | */ |
320eee77 | 330 | if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) |
69d25870 AV |
331 | last_idle_us = data->expected_us; |
332 | ||
333 | ||
334 | measured_us = last_idle_us; | |
4f86d3a8 | 335 | |
320eee77 | 336 | /* |
69d25870 AV |
337 | * We correct for the exit latency; we are assuming here that the |
338 | * exit latency happens after the event that we're interested in. | |
320eee77 | 339 | */ |
69d25870 AV |
340 | if (measured_us > data->exit_us) |
341 | measured_us -= data->exit_us; | |
342 | ||
343 | ||
344 | /* update our correction ratio */ | |
345 | ||
346 | new_factor = data->correction_factor[data->bucket] | |
347 | * (DECAY - 1) / DECAY; | |
348 | ||
1c6fe036 | 349 | if (data->expected_us > 0 && measured_us < MAX_INTERESTING) |
69d25870 | 350 | new_factor += RESOLUTION * measured_us / data->expected_us; |
320eee77 | 351 | else |
69d25870 AV |
352 | /* |
353 | * we were idle so long that we count it as a perfect | |
354 | * prediction | |
355 | */ | |
356 | new_factor += RESOLUTION; | |
320eee77 | 357 | |
69d25870 AV |
358 | /* |
359 | * We don't want 0 as factor; we always want at least | |
360 | * a tiny bit of estimated time. | |
361 | */ | |
362 | if (new_factor == 0) | |
363 | new_factor = 1; | |
320eee77 | 364 | |
69d25870 | 365 | data->correction_factor[data->bucket] = new_factor; |
1f85f87d AV |
366 | |
367 | /* update the repeating-pattern data */ | |
368 | data->intervals[data->interval_ptr++] = last_idle_us; | |
369 | if (data->interval_ptr >= INTERVALS) | |
370 | data->interval_ptr = 0; | |
4f86d3a8 LB |
371 | } |
372 | ||
373 | /** | |
374 | * menu_enable_device - scans a CPU's states and does setup | |
375 | * @dev: the CPU | |
376 | */ | |
377 | static int menu_enable_device(struct cpuidle_device *dev) | |
378 | { | |
379 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | |
380 | ||
381 | memset(data, 0, sizeof(struct menu_device)); | |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
386 | static struct cpuidle_governor menu_governor = { | |
387 | .name = "menu", | |
388 | .rating = 20, | |
389 | .enable = menu_enable_device, | |
390 | .select = menu_select, | |
391 | .reflect = menu_reflect, | |
392 | .owner = THIS_MODULE, | |
393 | }; | |
394 | ||
395 | /** | |
396 | * init_menu - initializes the governor | |
397 | */ | |
398 | static int __init init_menu(void) | |
399 | { | |
400 | return cpuidle_register_governor(&menu_governor); | |
401 | } | |
402 | ||
403 | /** | |
404 | * exit_menu - exits the governor | |
405 | */ | |
406 | static void __exit exit_menu(void) | |
407 | { | |
408 | cpuidle_unregister_governor(&menu_governor); | |
409 | } | |
410 | ||
411 | MODULE_LICENSE("GPL"); | |
412 | module_init(init_menu); | |
413 | module_exit(exit_menu); |