]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/panic.c
panic.c: fix whitespace additions
[net-next-2.6.git] / kernel / panic.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/panic.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem.
10 */
1da177e4
LT
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/delay.h>
14#include <linux/reboot.h>
15#include <linux/notifier.h>
16#include <linux/init.h>
17#include <linux/sysrq.h>
18#include <linux/interrupt.h>
19#include <linux/nmi.h>
dc009d92 20#include <linux/kexec.h>
657b3010 21#include <linux/debug_locks.h>
2c3b20e9 22#include <linux/random.h>
79b4cc5e 23#include <linux/kallsyms.h>
1da177e4 24
1da177e4
LT
25int panic_on_oops;
26int tainted;
dd287796
AM
27static int pause_on_oops;
28static int pause_on_oops_flag;
29static DEFINE_SPINLOCK(pause_on_oops_lock);
1da177e4 30
dd287796 31int panic_timeout;
1da177e4 32
e041c683 33ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
1da177e4
LT
34
35EXPORT_SYMBOL(panic_notifier_list);
36
37static int __init panic_setup(char *str)
38{
39 panic_timeout = simple_strtoul(str, NULL, 0);
40 return 1;
41}
42__setup("panic=", panic_setup);
43
44static long no_blink(long time)
45{
46 return 0;
47}
48
49/* Returns how long it waited in ms */
50long (*panic_blink)(long time);
51EXPORT_SYMBOL(panic_blink);
52
53/**
54 * panic - halt the system
55 * @fmt: The text string to print
56 *
57 * Display a message, then perform cleanups.
58 *
59 * This function never returns.
60 */
c277e63f 61
1da177e4
LT
62NORET_TYPE void panic(const char * fmt, ...)
63{
64 long i;
65 static char buf[1024];
66 va_list args;
347a8dc3 67#if defined(CONFIG_S390)
c277e63f 68 unsigned long caller = (unsigned long) __builtin_return_address(0);
1da177e4
LT
69#endif
70
dc009d92
EB
71 /*
72 * It's possible to come here directly from a panic-assertion and not
73 * have preempt disabled. Some functions called from here want
74 * preempt to be disabled. No point enabling it later though...
75 */
76 preempt_disable();
77
1da177e4
LT
78 bust_spinlocks(1);
79 va_start(args, fmt);
80 vsnprintf(buf, sizeof(buf), fmt, args);
81 va_end(args);
82 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
5cb27301
IM
83#ifdef CONFIG_DEBUG_BUGVERBOSE
84 dump_stack();
85#endif
1da177e4
LT
86 bust_spinlocks(0);
87
dc009d92
EB
88 /*
89 * If we have crashed and we have a crash kernel loaded let it handle
90 * everything else.
91 * Do we want to call this before we try to display a message?
92 */
6e274d14 93 crash_kexec(NULL);
dc009d92 94
1da177e4 95#ifdef CONFIG_SMP
dc009d92
EB
96 /*
97 * Note smp_send_stop is the usual smp shutdown function, which
98 * unfortunately means it may not be hardened to work in a panic
99 * situation.
100 */
1da177e4
LT
101 smp_send_stop();
102#endif
103
e041c683 104 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
1da177e4
LT
105
106 if (!panic_blink)
107 panic_blink = no_blink;
108
dc009d92 109 if (panic_timeout > 0) {
1da177e4
LT
110 /*
111 * Delay timeout seconds before rebooting the machine.
112 * We can't use the "normal" timers since we just panicked..
113 */
114 printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
115 for (i = 0; i < panic_timeout*1000; ) {
116 touch_nmi_watchdog();
117 i += panic_blink(i);
118 mdelay(1);
119 i++;
120 }
2f048ea8
EB
121 /* This will not be a clean reboot, with everything
122 * shutting down. But if there is a chance of
123 * rebooting the system it will be rebooted.
1da177e4 124 */
2f048ea8 125 emergency_restart();
1da177e4
LT
126 }
127#ifdef __sparc__
128 {
129 extern int stop_a_enabled;
a271c241 130 /* Make sure the user can actually press Stop-A (L1-A) */
1da177e4 131 stop_a_enabled = 1;
a271c241 132 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
1da177e4
LT
133 }
134#endif
347a8dc3 135#if defined(CONFIG_S390)
c277e63f 136 disabled_wait(caller);
1da177e4
LT
137#endif
138 local_irq_enable();
139 for (i = 0;;) {
c22db941 140 touch_softlockup_watchdog();
1da177e4
LT
141 i += panic_blink(i);
142 mdelay(1);
143 i++;
144 }
145}
146
147EXPORT_SYMBOL(panic);
148
149/**
150 * print_tainted - return a string to represent the kernel taint state.
151 *
152 * 'P' - Proprietary module has been loaded.
153 * 'F' - Module has been forcibly loaded.
154 * 'S' - SMP with CPUs not designed for SMP.
155 * 'R' - User forced a module unload.
9aa5e993 156 * 'M' - System experienced a machine check exception.
1da177e4 157 * 'B' - System has hit bad_page.
34f5a398 158 * 'U' - Userspace-defined naughtiness.
95b570c9
NH
159 * 'A' - ACPI table overridden.
160 * 'W' - Taint on warning.
1da177e4
LT
161 *
162 * The string is overwritten by the next call to print_taint().
163 */
c277e63f 164
1da177e4
LT
165const char *print_tainted(void)
166{
167 static char buf[20];
168 if (tainted) {
95b570c9 169 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c",
1da177e4
LT
170 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
171 tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
172 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
173 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
c277e63f 174 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
34f5a398 175 tainted & TAINT_BAD_PAGE ? 'B' : ' ',
bcdcd8e7 176 tainted & TAINT_USER ? 'U' : ' ',
6ed31e92 177 tainted & TAINT_DIE ? 'D' : ' ',
95b570c9
NH
178 tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ',
179 tainted & TAINT_WARN ? 'W' : ' ');
1da177e4
LT
180 }
181 else
182 snprintf(buf, sizeof(buf), "Not tainted");
183 return(buf);
184}
185
186void add_taint(unsigned flag)
187{
068c4579 188 debug_locks = 0; /* can't trust the integrity of the kernel anymore */
1da177e4
LT
189 tainted |= flag;
190}
191EXPORT_SYMBOL(add_taint);
dd287796
AM
192
193static int __init pause_on_oops_setup(char *str)
194{
195 pause_on_oops = simple_strtoul(str, NULL, 0);
196 return 1;
197}
198__setup("pause_on_oops=", pause_on_oops_setup);
199
200static void spin_msec(int msecs)
201{
202 int i;
203
204 for (i = 0; i < msecs; i++) {
205 touch_nmi_watchdog();
206 mdelay(1);
207 }
208}
209
210/*
211 * It just happens that oops_enter() and oops_exit() are identically
212 * implemented...
213 */
214static void do_oops_enter_exit(void)
215{
216 unsigned long flags;
217 static int spin_counter;
218
219 if (!pause_on_oops)
220 return;
221
222 spin_lock_irqsave(&pause_on_oops_lock, flags);
223 if (pause_on_oops_flag == 0) {
224 /* This CPU may now print the oops message */
225 pause_on_oops_flag = 1;
226 } else {
227 /* We need to stall this CPU */
228 if (!spin_counter) {
229 /* This CPU gets to do the counting */
230 spin_counter = pause_on_oops;
231 do {
232 spin_unlock(&pause_on_oops_lock);
233 spin_msec(MSEC_PER_SEC);
234 spin_lock(&pause_on_oops_lock);
235 } while (--spin_counter);
236 pause_on_oops_flag = 0;
237 } else {
238 /* This CPU waits for a different one */
239 while (spin_counter) {
240 spin_unlock(&pause_on_oops_lock);
241 spin_msec(1);
242 spin_lock(&pause_on_oops_lock);
243 }
244 }
245 }
246 spin_unlock_irqrestore(&pause_on_oops_lock, flags);
247}
248
249/*
250 * Return true if the calling CPU is allowed to print oops-related info. This
251 * is a bit racy..
252 */
253int oops_may_print(void)
254{
255 return pause_on_oops_flag == 0;
256}
257
258/*
259 * Called when the architecture enters its oops handler, before it prints
260 * anything. If this is the first CPU to oops, and it's oopsing the first time
261 * then let it proceed.
262 *
263 * This is all enabled by the pause_on_oops kernel boot option. We do all this
264 * to ensure that oopses don't scroll off the screen. It has the side-effect
265 * of preventing later-oopsing CPUs from mucking up the display, too.
266 *
267 * It turns out that the CPU which is allowed to print ends up pausing for the
268 * right duration, whereas all the other CPUs pause for twice as long: once in
269 * oops_enter(), once in oops_exit().
270 */
271void oops_enter(void)
272{
2c16e9c8 273 debug_locks_off(); /* can't trust the integrity of the kernel anymore */
dd287796
AM
274 do_oops_enter_exit();
275}
276
2c3b20e9
AV
277/*
278 * 64-bit random ID for oopses:
279 */
280static u64 oops_id;
281
282static int init_oops_id(void)
283{
284 if (!oops_id)
285 get_random_bytes(&oops_id, sizeof(oops_id));
286
287 return 0;
288}
289late_initcall(init_oops_id);
290
71c33911
AV
291static void print_oops_end_marker(void)
292{
293 init_oops_id();
294 printk(KERN_WARNING "---[ end trace %016llx ]---\n",
295 (unsigned long long)oops_id);
296}
297
dd287796
AM
298/*
299 * Called when the architecture exits its oops handler, after printing
300 * everything.
301 */
302void oops_exit(void)
303{
304 do_oops_enter_exit();
71c33911 305 print_oops_end_marker();
dd287796 306}
3162f751 307
79b4cc5e
AV
308#ifdef WANT_WARN_ON_SLOWPATH
309void warn_on_slowpath(const char *file, int line)
310{
311 char function[KSYM_SYMBOL_LEN];
312 unsigned long caller = (unsigned long) __builtin_return_address(0);
79b4cc5e 313 sprint_symbol(function, caller);
71c33911
AV
314
315 printk(KERN_WARNING "------------[ cut here ]------------\n");
79b4cc5e
AV
316 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
317 line, function);
71c33911 318 print_modules();
79b4cc5e 319 dump_stack();
71c33911 320 print_oops_end_marker();
95b570c9 321 add_taint(TAINT_WARN);
79b4cc5e
AV
322}
323EXPORT_SYMBOL(warn_on_slowpath);
324#endif
325
3162f751 326#ifdef CONFIG_CC_STACKPROTECTOR
54371a43
AV
327
328static unsigned long __stack_check_testing;
329/*
330 * Self test function for the stack-protector feature.
331 * This test requires that the local variable absolutely has
332 * a stack slot, hence the barrier()s.
333 */
334static noinline void __stack_chk_test_func(void)
335{
336 unsigned long foo;
337 barrier();
338 /*
339 * we need to make sure we're not about to clobber the return address,
340 * while real exploits do this, it's unhealthy on a running system.
341 * Besides, if we would, the test is already failed anyway so
342 * time to pull the emergency brake on it.
343 */
b719ac56 344 if ((unsigned long)__builtin_return_address(0) ==
54371a43
AV
345 *(((unsigned long *)&foo)+1)) {
346 printk(KERN_ERR "No -fstack-protector-stack-frame!\n");
347 return;
348 }
349#ifdef CONFIG_FRAME_POINTER
350 /* We also don't want to clobber the frame pointer */
b719ac56 351 if ((unsigned long)__builtin_return_address(0) ==
54371a43
AV
352 *(((unsigned long *)&foo)+2)) {
353 printk(KERN_ERR "No -fstack-protector-stack-frame!\n");
354 return;
355 }
356#endif
357 barrier();
358 if (current->stack_canary == *(((unsigned long *)&foo)+1))
359 *(((unsigned long *)&foo)+1) = 0;
360 else
361 printk(KERN_ERR "No -fstack-protector canary found\n");
362 barrier();
363}
364
365static int __stack_chk_test(void)
366{
367 printk(KERN_INFO "Testing -fstack-protector-all feature\n");
368 __stack_check_testing = (unsigned long)&__stack_chk_test_func;
369 __stack_chk_test_func();
370 if (__stack_check_testing) {
371 printk(KERN_ERR "-fstack-protector-all test failed\n");
372 WARN_ON(1);
373 }
374 return 0;
375}
3162f751
AV
376/*
377 * Called when gcc's -fstack-protector feature is used, and
378 * gcc detects corruption of the on-stack canary value
379 */
380void __stack_chk_fail(void)
381{
54371a43
AV
382 if (__stack_check_testing == (unsigned long)&__stack_chk_test_func) {
383 long delta;
384
385 delta = (unsigned long)__builtin_return_address(0) -
386 __stack_check_testing;
387 /*
388 * The test needs to happen inside the test function, so
389 * check if the return address is close to that function.
390 * The function is only 2 dozen bytes long, but keep a wide
391 * safety margin to avoid panic()s for normal users regardless
392 * of the quality of the compiler.
393 */
394 if (delta >= 0 && delta <= 400) {
395 __stack_check_testing = 0;
396 return;
397 }
398 }
517a92c4
IM
399 panic("stack-protector: Kernel stack is corrupted in: %p\n",
400 __builtin_return_address(0));
3162f751
AV
401}
402EXPORT_SYMBOL(__stack_chk_fail);
54371a43
AV
403
404late_initcall(__stack_chk_test);
3162f751 405#endif