2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * this code detects hard lockups: incidents in where on a CPU
7 * the kernel does not respond to anything except NMI.
9 * Note: Most of this code is borrowed heavily from softlockup.c,
10 * so thanks to Ingo for the initial implementation.
11 * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12 * to those contributors as well.
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
31 int __read_mostly softlockup_thresh = 60;
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
35 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
36 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
37 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
38 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
39 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
40 #ifdef CONFIG_PERF_EVENTS_NMI
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
46 static int __read_mostly did_panic;
47 static int __initdata no_watchdog;
52 * Should we panic when a soft-lockup or hard-lockup occurs:
54 #ifdef CONFIG_PERF_EVENTS_NMI
55 static int hardlockup_panic;
57 static int __init hardlockup_panic_setup(char *str)
59 if (!strncmp(str, "panic", 5))
63 __setup("nmi_watchdog=", hardlockup_panic_setup);
66 unsigned int __read_mostly softlockup_panic =
67 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
69 static int __init softlockup_panic_setup(char *str)
71 softlockup_panic = simple_strtoul(str, NULL, 0);
75 __setup("softlockup_panic=", softlockup_panic_setup);
77 static int __init nowatchdog_setup(char *str)
82 __setup("nowatchdog", nowatchdog_setup);
85 static int __init nosoftlockup_setup(char *str)
90 __setup("nosoftlockup", nosoftlockup_setup);
95 * Returns seconds, approximately. We don't need nanosecond
96 * resolution, and we don't need to waste time with a big divide when
99 static unsigned long get_timestamp(int this_cpu)
101 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
104 static unsigned long get_sample_period(void)
107 * convert softlockup_thresh from seconds to ns
108 * the divide by 5 is to give hrtimer 5 chances to
109 * increment before the hardlockup detector generates
112 return softlockup_thresh / 5 * NSEC_PER_SEC;
115 /* Commands for resetting the watchdog */
116 static void __touch_watchdog(void)
118 int this_cpu = raw_smp_processor_id();
120 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
123 void touch_softlockup_watchdog(void)
125 __get_cpu_var(watchdog_touch_ts) = 0;
127 EXPORT_SYMBOL(touch_softlockup_watchdog);
129 void touch_all_softlockup_watchdogs(void)
134 * this is done lockless
135 * do we care if a 0 races with a timestamp?
136 * all it means is the softlock check starts one cycle later
138 for_each_online_cpu(cpu)
139 per_cpu(watchdog_touch_ts, cpu) = 0;
142 void touch_nmi_watchdog(void)
144 __get_cpu_var(watchdog_nmi_touch) = true;
145 touch_softlockup_watchdog();
147 EXPORT_SYMBOL(touch_nmi_watchdog);
149 void touch_softlockup_watchdog_sync(void)
151 __raw_get_cpu_var(softlockup_touch_sync) = true;
152 __raw_get_cpu_var(watchdog_touch_ts) = 0;
155 #ifdef CONFIG_PERF_EVENTS_NMI
156 /* watchdog detector functions */
157 static int is_hardlockup(int cpu)
159 unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
161 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
164 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
169 static int is_softlockup(unsigned long touch_ts, int cpu)
171 unsigned long now = get_timestamp(cpu);
173 /* Warn about unreasonable delays: */
174 if (time_after(now, touch_ts + softlockup_thresh))
175 return now - touch_ts;
181 watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr)
188 static struct notifier_block panic_block = {
189 .notifier_call = watchdog_panic,
192 #ifdef CONFIG_PERF_EVENTS_NMI
193 static struct perf_event_attr wd_hw_attr = {
194 .type = PERF_TYPE_HARDWARE,
195 .config = PERF_COUNT_HW_CPU_CYCLES,
196 .size = sizeof(struct perf_event_attr),
201 /* Callback function for perf event subsystem */
202 void watchdog_overflow_callback(struct perf_event *event, int nmi,
203 struct perf_sample_data *data,
204 struct pt_regs *regs)
206 int this_cpu = smp_processor_id();
208 if (__get_cpu_var(watchdog_nmi_touch) == true) {
209 __get_cpu_var(watchdog_nmi_touch) = false;
213 /* check for a hardlockup
214 * This is done by making sure our timer interrupt
215 * is incrementing. The timer interrupt should have
216 * fired multiple times before we overflow'd. If it hasn't
217 * then this is a good indication the cpu is stuck
219 if (is_hardlockup(this_cpu)) {
220 /* only print hardlockups once */
221 if (__get_cpu_var(hard_watchdog_warn) == true)
224 if (hardlockup_panic)
225 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
227 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
229 __get_cpu_var(hard_watchdog_warn) = true;
233 __get_cpu_var(hard_watchdog_warn) = false;
236 static void watchdog_interrupt_count(void)
238 __get_cpu_var(hrtimer_interrupts)++;
241 static inline void watchdog_interrupt_count(void) { return; }
242 #endif /* CONFIG_PERF_EVENTS_NMI */
244 /* watchdog kicker functions */
245 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
247 int this_cpu = smp_processor_id();
248 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
249 struct pt_regs *regs = get_irq_regs();
252 /* kick the hardlockup detector */
253 watchdog_interrupt_count();
255 /* kick the softlockup detector */
256 wake_up_process(__get_cpu_var(softlockup_watchdog));
259 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
262 if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) {
264 * If the time stamp was touched atomically
265 * make sure the scheduler tick is up to date.
267 per_cpu(softlockup_touch_sync, this_cpu) = false;
271 return HRTIMER_RESTART;
274 /* check for a softlockup
275 * This is done by making sure a high priority task is
276 * being scheduled. The task touches the watchdog to
277 * indicate it is getting cpu time. If it hasn't then
278 * this is a good indication some task is hogging the cpu
280 duration = is_softlockup(touch_ts, this_cpu);
281 if (unlikely(duration)) {
283 if (__get_cpu_var(soft_watchdog_warn) == true)
284 return HRTIMER_RESTART;
286 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
288 current->comm, task_pid_nr(current));
290 print_irqtrace_events(current);
296 if (softlockup_panic)
297 panic("softlockup: hung tasks");
298 __get_cpu_var(soft_watchdog_warn) = true;
300 __get_cpu_var(soft_watchdog_warn) = false;
302 return HRTIMER_RESTART;
307 * The watchdog thread - touches the timestamp.
309 static int watchdog(void *__bind_cpu)
311 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
312 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu);
314 sched_setscheduler(current, SCHED_FIFO, ¶m);
316 /* initialize timestamp */
319 /* kick off the timer for the hardlockup detector */
320 /* done here because hrtimer_start can only pin to smp_processor_id() */
321 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
322 HRTIMER_MODE_REL_PINNED);
324 set_current_state(TASK_INTERRUPTIBLE);
326 * Run briefly once per second to reset the softlockup timestamp.
327 * If this gets delayed for more than 60 seconds then the
328 * debug-printout triggers in softlockup_tick().
330 while (!kthread_should_stop()) {
334 if (kthread_should_stop())
337 set_current_state(TASK_INTERRUPTIBLE);
339 __set_current_state(TASK_RUNNING);
345 #ifdef CONFIG_PERF_EVENTS_NMI
346 static int watchdog_nmi_enable(int cpu)
348 struct perf_event_attr *wd_attr;
349 struct perf_event *event = per_cpu(watchdog_ev, cpu);
351 /* is it already setup and enabled? */
352 if (event && event->state > PERF_EVENT_STATE_OFF)
355 /* it is setup but not enabled */
359 /* Try to register using hardware perf events */
360 wd_attr = &wd_hw_attr;
361 wd_attr->sample_period = hw_nmi_get_sample_period();
362 event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
363 if (!IS_ERR(event)) {
364 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
368 printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
373 per_cpu(watchdog_ev, cpu) = event;
375 perf_event_enable(per_cpu(watchdog_ev, cpu));
380 static void watchdog_nmi_disable(int cpu)
382 struct perf_event *event = per_cpu(watchdog_ev, cpu);
385 perf_event_disable(event);
386 per_cpu(watchdog_ev, cpu) = NULL;
388 /* should be in cleanup, but blocks oprofile */
389 perf_event_release_kernel(event);
394 static int watchdog_nmi_enable(int cpu) { return 0; }
395 static void watchdog_nmi_disable(int cpu) { return; }
396 #endif /* CONFIG_PERF_EVENTS_NMI */
398 /* prepare/enable/disable routines */
399 static int watchdog_prepare_cpu(int cpu)
401 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
403 WARN_ON(per_cpu(softlockup_watchdog, cpu));
404 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
405 hrtimer->function = watchdog_timer_fn;
410 static int watchdog_enable(int cpu)
412 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
414 /* enable the perf event */
415 if (watchdog_nmi_enable(cpu) != 0)
418 /* create the watchdog thread */
420 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
422 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
425 kthread_bind(p, cpu);
426 per_cpu(watchdog_touch_ts, cpu) = 0;
427 per_cpu(softlockup_watchdog, cpu) = p;
434 static void watchdog_disable(int cpu)
436 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
437 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
440 * cancel the timer first to stop incrementing the stats
441 * and waking up the kthread
443 hrtimer_cancel(hrtimer);
445 /* disable the perf event */
446 watchdog_nmi_disable(cpu);
448 /* stop the watchdog thread */
450 per_cpu(softlockup_watchdog, cpu) = NULL;
454 /* if any cpu succeeds, watchdog is considered enabled for the system */
455 watchdog_enabled = 1;
458 static void watchdog_enable_all_cpus(void)
463 for_each_online_cpu(cpu)
464 result += watchdog_enable(cpu);
467 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
471 static void watchdog_disable_all_cpus(void)
475 for_each_online_cpu(cpu)
476 watchdog_disable(cpu);
478 /* if all watchdogs are disabled, then they are disabled for the system */
479 watchdog_enabled = 0;
483 /* sysctl functions */
486 * proc handler for /proc/sys/kernel/nmi_watchdog
489 int proc_dowatchdog_enabled(struct ctl_table *table, int write,
490 void __user *buffer, size_t *length, loff_t *ppos)
492 proc_dointvec(table, write, buffer, length, ppos);
494 if (watchdog_enabled)
495 watchdog_enable_all_cpus();
497 watchdog_disable_all_cpus();
501 int proc_dowatchdog_thresh(struct ctl_table *table, int write,
503 size_t *lenp, loff_t *ppos)
505 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
507 #endif /* CONFIG_SYSCTL */
511 * Create/destroy watchdog threads as CPUs come and go:
514 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
516 int hotcpu = (unsigned long)hcpu;
520 case CPU_UP_PREPARE_FROZEN:
521 if (watchdog_prepare_cpu(hotcpu))
525 case CPU_ONLINE_FROZEN:
526 if (watchdog_enable(hotcpu))
529 #ifdef CONFIG_HOTPLUG_CPU
530 case CPU_UP_CANCELED:
531 case CPU_UP_CANCELED_FROZEN:
532 watchdog_disable(hotcpu);
535 case CPU_DEAD_FROZEN:
536 watchdog_disable(hotcpu);
538 #endif /* CONFIG_HOTPLUG_CPU */
543 static struct notifier_block __cpuinitdata cpu_nfb = {
544 .notifier_call = cpu_callback
547 static int __init spawn_watchdog_task(void)
549 void *cpu = (void *)(long)smp_processor_id();
555 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
556 WARN_ON(err == NOTIFY_BAD);
558 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
559 register_cpu_notifier(&cpu_nfb);
561 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
565 early_initcall(spawn_watchdog_task);