]>
Commit | Line | Data |
---|---|---|
8446f1d3 IM |
1 | /* |
2 | * Detect Soft Lockups | |
3 | * | |
6687a97d | 4 | * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc. |
8446f1d3 IM |
5 | * |
6 | * this code detects soft lockups: incidents in where on a CPU | |
7 | * the kernel does not reschedule for 10 seconds or more. | |
8 | */ | |
8446f1d3 IM |
9 | #include <linux/mm.h> |
10 | #include <linux/cpu.h> | |
82a1fcb9 | 11 | #include <linux/nmi.h> |
8446f1d3 IM |
12 | #include <linux/init.h> |
13 | #include <linux/delay.h> | |
83144186 | 14 | #include <linux/freezer.h> |
8446f1d3 IM |
15 | #include <linux/kthread.h> |
16 | #include <linux/notifier.h> | |
17 | #include <linux/module.h> | |
18 | ||
43581a10 IM |
19 | #include <asm/irq_regs.h> |
20 | ||
8446f1d3 IM |
21 | static DEFINE_SPINLOCK(print_lock); |
22 | ||
6687a97d IM |
23 | static DEFINE_PER_CPU(unsigned long, touch_timestamp); |
24 | static DEFINE_PER_CPU(unsigned long, print_timestamp); | |
8446f1d3 IM |
25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
26 | ||
90739081 | 27 | static int __read_mostly did_panic; |
9383d967 | 28 | int __read_mostly softlockup_thresh = 60; |
6687a97d | 29 | |
9c44bc03 IM |
30 | /* |
31 | * Should we panic (and reboot, if panic_timeout= is set) when a | |
32 | * soft-lockup occurs: | |
33 | */ | |
34 | unsigned int __read_mostly softlockup_panic = | |
35 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | |
36 | ||
37 | static int __init softlockup_panic_setup(char *str) | |
38 | { | |
39 | softlockup_panic = simple_strtoul(str, NULL, 0); | |
40 | ||
41 | return 1; | |
42 | } | |
43 | __setup("softlockup_panic=", softlockup_panic_setup); | |
44 | ||
6687a97d IM |
45 | static int |
46 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | |
8446f1d3 IM |
47 | { |
48 | did_panic = 1; | |
49 | ||
50 | return NOTIFY_DONE; | |
51 | } | |
52 | ||
53 | static struct notifier_block panic_block = { | |
54 | .notifier_call = softlock_panic, | |
55 | }; | |
56 | ||
966812dc JF |
57 | /* |
58 | * Returns seconds, approximately. We don't need nanosecond | |
59 | * resolution, and we don't need to waste time with a big divide when | |
60 | * 2^30ns == 1.074s. | |
61 | */ | |
a3b13c23 | 62 | static unsigned long get_timestamp(int this_cpu) |
966812dc | 63 | { |
82a1fcb9 | 64 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
966812dc JF |
65 | } |
66 | ||
8c2238ea | 67 | static void __touch_softlockup_watchdog(void) |
8446f1d3 | 68 | { |
a3b13c23 IM |
69 | int this_cpu = raw_smp_processor_id(); |
70 | ||
71 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); | |
8446f1d3 | 72 | } |
8c2238ea JW |
73 | |
74 | void touch_softlockup_watchdog(void) | |
75 | { | |
76 | __raw_get_cpu_var(touch_timestamp) = 0; | |
77 | } | |
8446f1d3 IM |
78 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
79 | ||
04c9167f JF |
80 | void touch_all_softlockup_watchdogs(void) |
81 | { | |
82 | int cpu; | |
83 | ||
84 | /* Cause each CPU to re-update its timestamp rather than complain */ | |
85 | for_each_online_cpu(cpu) | |
86 | per_cpu(touch_timestamp, cpu) = 0; | |
87 | } | |
88 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | |
89 | ||
8446f1d3 IM |
90 | /* |
91 | * This callback runs from the timer interrupt, and checks | |
92 | * whether the watchdog thread has hung or not: | |
93 | */ | |
6687a97d | 94 | void softlockup_tick(void) |
8446f1d3 IM |
95 | { |
96 | int this_cpu = smp_processor_id(); | |
6687a97d | 97 | unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); |
966812dc | 98 | unsigned long print_timestamp; |
43581a10 | 99 | struct pt_regs *regs = get_irq_regs(); |
966812dc | 100 | unsigned long now; |
8446f1d3 | 101 | |
9383d967 DS |
102 | /* Is detection switched off? */ |
103 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | |
104 | /* Be sure we don't false trigger if switched back on */ | |
105 | if (touch_timestamp) | |
106 | per_cpu(touch_timestamp, this_cpu) = 0; | |
107 | return; | |
108 | } | |
109 | ||
04c9167f | 110 | if (touch_timestamp == 0) { |
8c2238ea | 111 | __touch_softlockup_watchdog(); |
966812dc | 112 | return; |
04c9167f | 113 | } |
966812dc JF |
114 | |
115 | print_timestamp = per_cpu(print_timestamp, this_cpu); | |
116 | ||
117 | /* report at most once a second */ | |
a115d5ca IM |
118 | if ((print_timestamp >= touch_timestamp && |
119 | print_timestamp < (touch_timestamp + 1)) || | |
9383d967 | 120 | did_panic) { |
8446f1d3 | 121 | return; |
a115d5ca | 122 | } |
8446f1d3 | 123 | |
6687a97d IM |
124 | /* do not print during early bootup: */ |
125 | if (unlikely(system_state != SYSTEM_RUNNING)) { | |
8c2238ea | 126 | __touch_softlockup_watchdog(); |
8446f1d3 | 127 | return; |
6687a97d | 128 | } |
8446f1d3 | 129 | |
a3b13c23 | 130 | now = get_timestamp(this_cpu); |
966812dc | 131 | |
ed50d6cb PZ |
132 | /* Wake up the high-prio watchdog task every second: */ |
133 | if (now > (touch_timestamp + 1)) | |
134 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | |
135 | ||
82a1fcb9 | 136 | /* Warn about unreasonable delays: */ |
c4f3b63f | 137 | if (now <= (touch_timestamp + softlockup_thresh)) |
43581a10 | 138 | return; |
8446f1d3 | 139 | |
43581a10 IM |
140 | per_cpu(print_timestamp, this_cpu) = touch_timestamp; |
141 | ||
142 | spin_lock(&print_lock); | |
c4f3b63f RT |
143 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", |
144 | this_cpu, now - touch_timestamp, | |
ba25f9dc | 145 | current->comm, task_pid_nr(current)); |
688c9175 | 146 | print_modules(); |
43581a10 IM |
147 | if (regs) |
148 | show_regs(regs); | |
149 | else | |
6687a97d | 150 | dump_stack(); |
43581a10 | 151 | spin_unlock(&print_lock); |
9c44bc03 IM |
152 | |
153 | if (softlockup_panic) | |
154 | panic("softlockup: hung tasks"); | |
8446f1d3 IM |
155 | } |
156 | ||
82a1fcb9 IM |
157 | /* |
158 | * Have a reasonable limit on the number of tasks checked: | |
159 | */ | |
90739081 | 160 | unsigned long __read_mostly sysctl_hung_task_check_count = 1024; |
82a1fcb9 IM |
161 | |
162 | /* | |
163 | * Zero means infinite timeout - no checking done: | |
164 | */ | |
90739081 | 165 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; |
82a1fcb9 | 166 | |
90739081 | 167 | unsigned long __read_mostly sysctl_hung_task_warnings = 10; |
82a1fcb9 IM |
168 | |
169 | /* | |
170 | * Only do the hung-tasks check on one CPU: | |
171 | */ | |
172 | static int check_cpu __read_mostly = -1; | |
173 | ||
174 | static void check_hung_task(struct task_struct *t, unsigned long now) | |
175 | { | |
176 | unsigned long switch_count = t->nvcsw + t->nivcsw; | |
177 | ||
178 | if (t->flags & PF_FROZEN) | |
179 | return; | |
180 | ||
181 | if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { | |
182 | t->last_switch_count = switch_count; | |
183 | t->last_switch_timestamp = now; | |
184 | return; | |
185 | } | |
186 | if ((long)(now - t->last_switch_timestamp) < | |
187 | sysctl_hung_task_timeout_secs) | |
188 | return; | |
189 | if (sysctl_hung_task_warnings < 0) | |
190 | return; | |
191 | sysctl_hung_task_warnings--; | |
192 | ||
193 | /* | |
194 | * Ok, the task did not get scheduled for more than 2 minutes, | |
195 | * complain: | |
196 | */ | |
197 | printk(KERN_ERR "INFO: task %s:%d blocked for more than " | |
198 | "%ld seconds.\n", t->comm, t->pid, | |
199 | sysctl_hung_task_timeout_secs); | |
200 | printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" | |
201 | " disables this message.\n"); | |
202 | sched_show_task(t); | |
203 | __debug_show_held_locks(t); | |
204 | ||
205 | t->last_switch_timestamp = now; | |
206 | touch_nmi_watchdog(); | |
9c44bc03 IM |
207 | |
208 | if (softlockup_panic) | |
209 | panic("softlockup: blocked tasks"); | |
82a1fcb9 IM |
210 | } |
211 | ||
212 | /* | |
213 | * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for | |
214 | * a really long time (120 seconds). If that happens, print out | |
215 | * a warning. | |
216 | */ | |
217 | static void check_hung_uninterruptible_tasks(int this_cpu) | |
218 | { | |
219 | int max_count = sysctl_hung_task_check_count; | |
220 | unsigned long now = get_timestamp(this_cpu); | |
221 | struct task_struct *g, *t; | |
222 | ||
223 | /* | |
224 | * If the system crashed already then all bets are off, | |
225 | * do not report extra hung tasks: | |
226 | */ | |
227 | if ((tainted & TAINT_DIE) || did_panic) | |
228 | return; | |
229 | ||
230 | read_lock(&tasklist_lock); | |
231 | do_each_thread(g, t) { | |
232 | if (!--max_count) | |
ed50d6cb | 233 | goto unlock; |
82a1fcb9 IM |
234 | if (t->state & TASK_UNINTERRUPTIBLE) |
235 | check_hung_task(t, now); | |
236 | } while_each_thread(g, t); | |
ed50d6cb | 237 | unlock: |
82a1fcb9 IM |
238 | read_unlock(&tasklist_lock); |
239 | } | |
240 | ||
8446f1d3 IM |
241 | /* |
242 | * The watchdog thread - runs every second and touches the timestamp. | |
243 | */ | |
a5f2ce3c | 244 | static int watchdog(void *__bind_cpu) |
8446f1d3 | 245 | { |
02fb6149 | 246 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
82a1fcb9 | 247 | int this_cpu = (long)__bind_cpu; |
8446f1d3 IM |
248 | |
249 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
8446f1d3 | 250 | |
966812dc | 251 | /* initialize timestamp */ |
8c2238ea | 252 | __touch_softlockup_watchdog(); |
966812dc | 253 | |
7be2a03e | 254 | set_current_state(TASK_INTERRUPTIBLE); |
8446f1d3 | 255 | /* |
6687a97d | 256 | * Run briefly once per second to reset the softlockup timestamp. |
82a1fcb9 | 257 | * If this gets delayed for more than 60 seconds then the |
6687a97d | 258 | * debug-printout triggers in softlockup_tick(). |
8446f1d3 IM |
259 | */ |
260 | while (!kthread_should_stop()) { | |
8c2238ea | 261 | __touch_softlockup_watchdog(); |
ed50d6cb PZ |
262 | schedule(); |
263 | ||
264 | if (kthread_should_stop()) | |
265 | break; | |
82a1fcb9 | 266 | |
7be2a03e DA |
267 | if (this_cpu == check_cpu) { |
268 | if (sysctl_hung_task_timeout_secs) | |
269 | check_hung_uninterruptible_tasks(this_cpu); | |
270 | } | |
ed50d6cb | 271 | |
7be2a03e | 272 | set_current_state(TASK_INTERRUPTIBLE); |
8446f1d3 | 273 | } |
7be2a03e | 274 | __set_current_state(TASK_RUNNING); |
8446f1d3 IM |
275 | |
276 | return 0; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Create/destroy watchdog threads as CPUs come and go: | |
281 | */ | |
8c78f307 | 282 | static int __cpuinit |
8446f1d3 IM |
283 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
284 | { | |
285 | int hotcpu = (unsigned long)hcpu; | |
286 | struct task_struct *p; | |
287 | ||
288 | switch (action) { | |
289 | case CPU_UP_PREPARE: | |
8bb78442 | 290 | case CPU_UP_PREPARE_FROZEN: |
8446f1d3 IM |
291 | BUG_ON(per_cpu(watchdog_task, hotcpu)); |
292 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); | |
293 | if (IS_ERR(p)) { | |
a5f2ce3c | 294 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); |
8446f1d3 IM |
295 | return NOTIFY_BAD; |
296 | } | |
a5f2ce3c IM |
297 | per_cpu(touch_timestamp, hotcpu) = 0; |
298 | per_cpu(watchdog_task, hotcpu) = p; | |
8446f1d3 | 299 | kthread_bind(p, hotcpu); |
a5f2ce3c | 300 | break; |
8446f1d3 | 301 | case CPU_ONLINE: |
8bb78442 | 302 | case CPU_ONLINE_FROZEN: |
82a1fcb9 | 303 | check_cpu = any_online_cpu(cpu_online_map); |
8446f1d3 IM |
304 | wake_up_process(per_cpu(watchdog_task, hotcpu)); |
305 | break; | |
306 | #ifdef CONFIG_HOTPLUG_CPU | |
82a1fcb9 IM |
307 | case CPU_DOWN_PREPARE: |
308 | case CPU_DOWN_PREPARE_FROZEN: | |
309 | if (hotcpu == check_cpu) { | |
310 | cpumask_t temp_cpu_online_map = cpu_online_map; | |
311 | ||
312 | cpu_clear(hotcpu, temp_cpu_online_map); | |
313 | check_cpu = any_online_cpu(temp_cpu_online_map); | |
314 | } | |
315 | break; | |
ed50d6cb PZ |
316 | |
317 | case CPU_UP_CANCELED: | |
318 | case CPU_UP_CANCELED_FROZEN: | |
319 | if (!per_cpu(watchdog_task, hotcpu)) | |
320 | break; | |
321 | /* Unbind so it can run. Fall thru. */ | |
322 | kthread_bind(per_cpu(watchdog_task, hotcpu), | |
323 | any_online_cpu(cpu_online_map)); | |
8446f1d3 | 324 | case CPU_DEAD: |
8bb78442 | 325 | case CPU_DEAD_FROZEN: |
8446f1d3 IM |
326 | p = per_cpu(watchdog_task, hotcpu); |
327 | per_cpu(watchdog_task, hotcpu) = NULL; | |
328 | kthread_stop(p); | |
329 | break; | |
330 | #endif /* CONFIG_HOTPLUG_CPU */ | |
a5f2ce3c | 331 | } |
8446f1d3 IM |
332 | return NOTIFY_OK; |
333 | } | |
334 | ||
8c78f307 | 335 | static struct notifier_block __cpuinitdata cpu_nfb = { |
8446f1d3 IM |
336 | .notifier_call = cpu_callback |
337 | }; | |
338 | ||
339 | __init void spawn_softlockup_task(void) | |
340 | { | |
341 | void *cpu = (void *)(long)smp_processor_id(); | |
07dccf33 | 342 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
8446f1d3 | 343 | |
07dccf33 | 344 | BUG_ON(err == NOTIFY_BAD); |
8446f1d3 IM |
345 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
346 | register_cpu_notifier(&cpu_nfb); | |
347 | ||
e041c683 | 348 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
8446f1d3 | 349 | } |