]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/entry_64.S
tracing/ftrace: add the printk-msg-only option
[net-next-2.6.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
395a59d0 54#include <asm/ftrace.h>
1da177e4 55
86a1c34a
RM
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
1da177e4
LT
62 .code64
63
606576ce 64#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount)
d61f82d0
SR
67 retq
68END(mcount)
69
70ENTRY(ftrace_caller)
60a7ecf4
SR
71 cmpl $0, function_trace_stop
72 jne ftrace_stub
d61f82d0
SR
73
74 /* taken from glibc */
75 subq $0x38, %rsp
76 movq %rax, (%rsp)
77 movq %rcx, 8(%rsp)
78 movq %rdx, 16(%rsp)
79 movq %rsi, 24(%rsp)
80 movq %rdi, 32(%rsp)
81 movq %r8, 40(%rsp)
82 movq %r9, 48(%rsp)
83
84 movq 0x38(%rsp), %rdi
85 movq 8(%rbp), %rsi
395a59d0 86 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
87
88.globl ftrace_call
89ftrace_call:
90 call ftrace_stub
91
92 movq 48(%rsp), %r9
93 movq 40(%rsp), %r8
94 movq 32(%rsp), %rdi
95 movq 24(%rsp), %rsi
96 movq 16(%rsp), %rdx
97 movq 8(%rsp), %rcx
98 movq (%rsp), %rax
99 addq $0x38, %rsp
100
48d68b20
FW
101#ifdef CONFIG_FUNCTION_GRAPH_TRACER
102.globl ftrace_graph_call
103ftrace_graph_call:
104 jmp ftrace_stub
105#endif
106
d61f82d0
SR
107.globl ftrace_stub
108ftrace_stub:
109 retq
110END(ftrace_caller)
111
112#else /* ! CONFIG_DYNAMIC_FTRACE */
16444a8a 113ENTRY(mcount)
60a7ecf4
SR
114 cmpl $0, function_trace_stop
115 jne ftrace_stub
116
16444a8a
ACM
117 cmpq $ftrace_stub, ftrace_trace_function
118 jnz trace
48d68b20
FW
119
120#ifdef CONFIG_FUNCTION_GRAPH_TRACER
121 cmpq $ftrace_stub, ftrace_graph_return
122 jnz ftrace_graph_caller
e49dc19c
SR
123
124 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
125 jnz ftrace_graph_caller
48d68b20
FW
126#endif
127
16444a8a
ACM
128.globl ftrace_stub
129ftrace_stub:
130 retq
131
132trace:
133 /* taken from glibc */
134 subq $0x38, %rsp
135 movq %rax, (%rsp)
136 movq %rcx, 8(%rsp)
137 movq %rdx, 16(%rsp)
138 movq %rsi, 24(%rsp)
139 movq %rdi, 32(%rsp)
140 movq %r8, 40(%rsp)
141 movq %r9, 48(%rsp)
142
143 movq 0x38(%rsp), %rdi
144 movq 8(%rbp), %rsi
395a59d0 145 subq $MCOUNT_INSN_SIZE, %rdi
16444a8a
ACM
146
147 call *ftrace_trace_function
148
149 movq 48(%rsp), %r9
150 movq 40(%rsp), %r8
151 movq 32(%rsp), %rdi
152 movq 24(%rsp), %rsi
153 movq 16(%rsp), %rdx
154 movq 8(%rsp), %rcx
155 movq (%rsp), %rax
156 addq $0x38, %rsp
157
158 jmp ftrace_stub
159END(mcount)
d61f82d0 160#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 161#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 162
48d68b20
FW
163#ifdef CONFIG_FUNCTION_GRAPH_TRACER
164ENTRY(ftrace_graph_caller)
165 cmpl $0, function_trace_stop
166 jne ftrace_stub
167
168 subq $0x38, %rsp
169 movq %rax, (%rsp)
170 movq %rcx, 8(%rsp)
171 movq %rdx, 16(%rsp)
172 movq %rsi, 24(%rsp)
173 movq %rdi, 32(%rsp)
174 movq %r8, 40(%rsp)
175 movq %r9, 48(%rsp)
176
177 leaq 8(%rbp), %rdi
178 movq 0x38(%rsp), %rsi
bb4304c7 179 subq $MCOUNT_INSN_SIZE, %rsi
48d68b20
FW
180
181 call prepare_ftrace_return
182
183 movq 48(%rsp), %r9
184 movq 40(%rsp), %r8
185 movq 32(%rsp), %rdi
186 movq 24(%rsp), %rsi
187 movq 16(%rsp), %rdx
188 movq 8(%rsp), %rcx
189 movq (%rsp), %rax
190 addq $0x38, %rsp
191 retq
192END(ftrace_graph_caller)
193
194
195.globl return_to_handler
196return_to_handler:
197 subq $80, %rsp
198
199 movq %rax, (%rsp)
200 movq %rcx, 8(%rsp)
201 movq %rdx, 16(%rsp)
202 movq %rsi, 24(%rsp)
203 movq %rdi, 32(%rsp)
204 movq %r8, 40(%rsp)
205 movq %r9, 48(%rsp)
206 movq %r10, 56(%rsp)
207 movq %r11, 64(%rsp)
208
209 call ftrace_return_to_handler
210
211 movq %rax, 72(%rsp)
212 movq 64(%rsp), %r11
213 movq 56(%rsp), %r10
214 movq 48(%rsp), %r9
215 movq 40(%rsp), %r8
216 movq 32(%rsp), %rdi
217 movq 24(%rsp), %rsi
218 movq 16(%rsp), %rdx
219 movq 8(%rsp), %rcx
220 movq (%rsp), %rax
221 addq $72, %rsp
222 retq
223#endif
224
225
dc37db4d 226#ifndef CONFIG_PREEMPT
1da177e4
LT
227#define retint_kernel retint_restore_args
228#endif
2601e64d 229
72fe4858 230#ifdef CONFIG_PARAVIRT
2be29982 231ENTRY(native_usergs_sysret64)
72fe4858
GOC
232 swapgs
233 sysretq
234#endif /* CONFIG_PARAVIRT */
235
2601e64d
IM
236
237.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
238#ifdef CONFIG_TRACE_IRQFLAGS
239 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
240 jnc 1f
241 TRACE_IRQS_ON
2421:
243#endif
244.endm
245
1da177e4
LT
246/*
247 * C code is not supposed to know about undefined top of stack. Every time
248 * a C function with an pt_regs argument is called from the SYSCALL based
249 * fast path FIXUP_TOP_OF_STACK is needed.
250 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
251 * manipulation.
252 */
253
254 /* %rsp:at FRAMEEND */
255 .macro FIXUP_TOP_OF_STACK tmp
256 movq %gs:pda_oldrsp,\tmp
257 movq \tmp,RSP(%rsp)
258 movq $__USER_DS,SS(%rsp)
259 movq $__USER_CS,CS(%rsp)
260 movq $-1,RCX(%rsp)
261 movq R11(%rsp),\tmp /* get eflags */
262 movq \tmp,EFLAGS(%rsp)
263 .endm
264
265 .macro RESTORE_TOP_OF_STACK tmp,offset=0
266 movq RSP-\offset(%rsp),\tmp
267 movq \tmp,%gs:pda_oldrsp
268 movq EFLAGS-\offset(%rsp),\tmp
269 movq \tmp,R11-\offset(%rsp)
270 .endm
271
272 .macro FAKE_STACK_FRAME child_rip
273 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 274 xorl %eax, %eax
e04e0a63 275 pushq $__KERNEL_DS /* ss */
1da177e4 276 CFI_ADJUST_CFA_OFFSET 8
7effaa88 277 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
278 pushq %rax /* rsp */
279 CFI_ADJUST_CFA_OFFSET 8
7effaa88 280 CFI_REL_OFFSET rsp,0
1da177e4
LT
281 pushq $(1<<9) /* eflags - interrupts on */
282 CFI_ADJUST_CFA_OFFSET 8
7effaa88 283 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
284 pushq $__KERNEL_CS /* cs */
285 CFI_ADJUST_CFA_OFFSET 8
7effaa88 286 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
287 pushq \child_rip /* rip */
288 CFI_ADJUST_CFA_OFFSET 8
7effaa88 289 CFI_REL_OFFSET rip,0
1da177e4
LT
290 pushq %rax /* orig rax */
291 CFI_ADJUST_CFA_OFFSET 8
292 .endm
293
294 .macro UNFAKE_STACK_FRAME
295 addq $8*6, %rsp
296 CFI_ADJUST_CFA_OFFSET -(6*8)
297 .endm
298
7effaa88
JB
299 .macro CFI_DEFAULT_STACK start=1
300 .if \start
301 CFI_STARTPROC simple
adf14236 302 CFI_SIGNAL_FRAME
7effaa88
JB
303 CFI_DEF_CFA rsp,SS+8
304 .else
305 CFI_DEF_CFA_OFFSET SS+8
306 .endif
307 CFI_REL_OFFSET r15,R15
308 CFI_REL_OFFSET r14,R14
309 CFI_REL_OFFSET r13,R13
310 CFI_REL_OFFSET r12,R12
311 CFI_REL_OFFSET rbp,RBP
312 CFI_REL_OFFSET rbx,RBX
313 CFI_REL_OFFSET r11,R11
314 CFI_REL_OFFSET r10,R10
315 CFI_REL_OFFSET r9,R9
316 CFI_REL_OFFSET r8,R8
317 CFI_REL_OFFSET rax,RAX
318 CFI_REL_OFFSET rcx,RCX
319 CFI_REL_OFFSET rdx,RDX
320 CFI_REL_OFFSET rsi,RSI
321 CFI_REL_OFFSET rdi,RDI
322 CFI_REL_OFFSET rip,RIP
323 /*CFI_REL_OFFSET cs,CS*/
324 /*CFI_REL_OFFSET rflags,EFLAGS*/
325 CFI_REL_OFFSET rsp,RSP
326 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
327 .endm
328/*
329 * A newly forked process directly context switches into this.
330 */
331/* rdi: prev */
332ENTRY(ret_from_fork)
1da177e4 333 CFI_DEFAULT_STACK
658fdbef 334 push kernel_eflags(%rip)
e0a5a5d9 335 CFI_ADJUST_CFA_OFFSET 8
658fdbef 336 popf # reset kernel eflags
e0a5a5d9 337 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
338 call schedule_tail
339 GET_THREAD_INFO(%rcx)
26ccb8a7 340 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4
LT
341 jnz rff_trace
342rff_action:
343 RESTORE_REST
344 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
345 je int_ret_from_sys_call
26ccb8a7 346 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4
LT
347 jnz int_ret_from_sys_call
348 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
349 jmp ret_from_sys_call
350rff_trace:
351 movq %rsp,%rdi
352 call syscall_trace_leave
353 GET_THREAD_INFO(%rcx)
354 jmp rff_action
355 CFI_ENDPROC
4b787e0b 356END(ret_from_fork)
1da177e4
LT
357
358/*
359 * System call entry. Upto 6 arguments in registers are supported.
360 *
361 * SYSCALL does not save anything on the stack and does not change the
362 * stack pointer.
363 */
364
365/*
366 * Register setup:
367 * rax system call number
368 * rdi arg0
369 * rcx return address for syscall/sysret, C arg3
370 * rsi arg1
371 * rdx arg2
372 * r10 arg3 (--> moved to rcx for C)
373 * r8 arg4
374 * r9 arg5
375 * r11 eflags for syscall/sysret, temporary for C
376 * r12-r15,rbp,rbx saved by C code, not touched.
377 *
378 * Interrupts are off on entry.
379 * Only called from user space.
380 *
381 * XXX if we had a free scratch register we could save the RSP into the stack frame
382 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
383 *
384 * When user can change the frames always force IRET. That is because
385 * it deals with uncanonical addresses better. SYSRET has trouble
386 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
387 */
388
389ENTRY(system_call)
7effaa88 390 CFI_STARTPROC simple
adf14236 391 CFI_SIGNAL_FRAME
dffead4e 392 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
393 CFI_REGISTER rip,rcx
394 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
395 SWAPGS_UNSAFE_STACK
396 /*
397 * A hypervisor implementation might want to use a label
398 * after the swapgs, so that it can do the swapgs
399 * for the guest and jump here on syscall.
400 */
401ENTRY(system_call_after_swapgs)
402
1da177e4
LT
403 movq %rsp,%gs:pda_oldrsp
404 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
405 /*
406 * No need to follow this irqs off/on section - it's straight
407 * and short:
408 */
72fe4858 409 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
410 SAVE_ARGS 8,1
411 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
412 movq %rcx,RIP-ARGOFFSET(%rsp)
413 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 414 GET_THREAD_INFO(%rcx)
d4d67150 415 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
1da177e4 416 jnz tracesys
86a1c34a 417system_call_fastpath:
1da177e4
LT
418 cmpq $__NR_syscall_max,%rax
419 ja badsys
420 movq %r10,%rcx
421 call *sys_call_table(,%rax,8) # XXX: rip relative
422 movq %rax,RAX-ARGOFFSET(%rsp)
423/*
424 * Syscall return path ending with SYSRET (fast path)
425 * Has incomplete stack frame and undefined top of stack.
426 */
1da177e4 427ret_from_sys_call:
11b854b2 428 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
429 /* edi: flagmask */
430sysret_check:
10cd706d 431 LOCKDEP_SYS_EXIT
1da177e4 432 GET_THREAD_INFO(%rcx)
72fe4858 433 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 434 TRACE_IRQS_OFF
26ccb8a7 435 movl TI_flags(%rcx),%edx
1da177e4
LT
436 andl %edi,%edx
437 jnz sysret_careful
bcddc015 438 CFI_REMEMBER_STATE
2601e64d
IM
439 /*
440 * sysretq will re-enable interrupts:
441 */
442 TRACE_IRQS_ON
1da177e4 443 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 444 CFI_REGISTER rip,rcx
1da177e4 445 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 446 /*CFI_REGISTER rflags,r11*/
c7245da6 447 movq %gs:pda_oldrsp, %rsp
2be29982 448 USERGS_SYSRET64
1da177e4 449
bcddc015 450 CFI_RESTORE_STATE
1da177e4
LT
451 /* Handle reschedules */
452 /* edx: work, edi: workmask */
453sysret_careful:
454 bt $TIF_NEED_RESCHED,%edx
455 jnc sysret_signal
2601e64d 456 TRACE_IRQS_ON
72fe4858 457 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 458 pushq %rdi
7effaa88 459 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
460 call schedule
461 popq %rdi
7effaa88 462 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
463 jmp sysret_check
464
465 /* Handle a signal */
466sysret_signal:
2601e64d 467 TRACE_IRQS_ON
72fe4858 468 ENABLE_INTERRUPTS(CLBR_NONE)
86a1c34a
RM
469#ifdef CONFIG_AUDITSYSCALL
470 bt $TIF_SYSCALL_AUDIT,%edx
471 jc sysret_audit
472#endif
10ffdbb8 473 /* edx: work flags (arg3) */
1da177e4
LT
474 leaq do_notify_resume(%rip),%rax
475 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
476 xorl %esi,%esi # oldset -> arg2
477 call ptregscall_common
15e8f348 478 movl $_TIF_WORK_MASK,%edi
7bf36bbc
AK
479 /* Use IRET because user could have changed frame. This
480 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 481 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 482 TRACE_IRQS_OFF
7bf36bbc 483 jmp int_with_check
1da177e4 484
7effaa88
JB
485badsys:
486 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
487 jmp ret_from_sys_call
488
86a1c34a
RM
489#ifdef CONFIG_AUDITSYSCALL
490 /*
491 * Fast path for syscall audit without full syscall trace.
492 * We just call audit_syscall_entry() directly, and then
493 * jump back to the normal fast path.
494 */
495auditsys:
496 movq %r10,%r9 /* 6th arg: 4th syscall arg */
497 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
498 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
499 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
500 movq %rax,%rsi /* 2nd arg: syscall number */
501 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
502 call audit_syscall_entry
503 LOAD_ARGS 0 /* reload call-clobbered registers */
504 jmp system_call_fastpath
505
506 /*
507 * Return fast path for syscall audit. Call audit_syscall_exit()
508 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
509 * masked off.
510 */
511sysret_audit:
512 movq %rax,%rsi /* second arg, syscall return value */
513 cmpq $0,%rax /* is it < 0? */
514 setl %al /* 1 if so, 0 if not */
515 movzbl %al,%edi /* zero-extend that into %edi */
516 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
517 call audit_syscall_exit
518 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
519 jmp sysret_check
520#endif /* CONFIG_AUDITSYSCALL */
521
1da177e4
LT
522 /* Do syscall tracing */
523tracesys:
86a1c34a
RM
524#ifdef CONFIG_AUDITSYSCALL
525 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
526 jz auditsys
527#endif
1da177e4 528 SAVE_REST
a31f8dd7 529 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
530 FIXUP_TOP_OF_STACK %rdi
531 movq %rsp,%rdi
532 call syscall_trace_enter
d4d67150
RM
533 /*
534 * Reload arg registers from stack in case ptrace changed them.
535 * We don't reload %rax because syscall_trace_enter() returned
536 * the value it wants us to use in the table lookup.
537 */
538 LOAD_ARGS ARGOFFSET, 1
1da177e4
LT
539 RESTORE_REST
540 cmpq $__NR_syscall_max,%rax
a31f8dd7 541 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
542 movq %r10,%rcx /* fixup for C */
543 call *sys_call_table(,%rax,8)
a31f8dd7 544 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 545 /* Use IRET because user could have changed frame */
1da177e4 546
1da177e4
LT
547/*
548 * Syscall return path ending with IRET.
549 * Has correct top of stack, but partial stack frame.
bcddc015
JB
550 */
551 .globl int_ret_from_sys_call
5cbf1565 552 .globl int_with_check
bcddc015 553int_ret_from_sys_call:
72fe4858 554 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 555 TRACE_IRQS_OFF
1da177e4
LT
556 testl $3,CS-ARGOFFSET(%rsp)
557 je retint_restore_args
558 movl $_TIF_ALLWORK_MASK,%edi
559 /* edi: mask to check */
560int_with_check:
10cd706d 561 LOCKDEP_SYS_EXIT_IRQ
1da177e4 562 GET_THREAD_INFO(%rcx)
26ccb8a7 563 movl TI_flags(%rcx),%edx
1da177e4
LT
564 andl %edi,%edx
565 jnz int_careful
26ccb8a7 566 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
567 jmp retint_swapgs
568
569 /* Either reschedule or signal or syscall exit tracking needed. */
570 /* First do a reschedule test. */
571 /* edx: work, edi: workmask */
572int_careful:
573 bt $TIF_NEED_RESCHED,%edx
574 jnc int_very_careful
2601e64d 575 TRACE_IRQS_ON
72fe4858 576 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 577 pushq %rdi
7effaa88 578 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
579 call schedule
580 popq %rdi
7effaa88 581 CFI_ADJUST_CFA_OFFSET -8
72fe4858 582 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 583 TRACE_IRQS_OFF
1da177e4
LT
584 jmp int_with_check
585
586 /* handle signals and tracing -- both require a full stack frame */
587int_very_careful:
2601e64d 588 TRACE_IRQS_ON
72fe4858 589 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
590 SAVE_REST
591 /* Check for syscall exit trace */
d4d67150 592 testl $_TIF_WORK_SYSCALL_EXIT,%edx
1da177e4
LT
593 jz int_signal
594 pushq %rdi
7effaa88 595 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
596 leaq 8(%rsp),%rdi # &ptregs -> arg1
597 call syscall_trace_leave
598 popq %rdi
7effaa88 599 CFI_ADJUST_CFA_OFFSET -8
d4d67150 600 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
1da177e4
LT
601 jmp int_restore_rest
602
603int_signal:
8f4d37ec 604 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
605 jz 1f
606 movq %rsp,%rdi # &ptregs -> arg1
607 xorl %esi,%esi # oldset -> arg2
608 call do_notify_resume
eca91e78 6091: movl $_TIF_WORK_MASK,%edi
1da177e4
LT
610int_restore_rest:
611 RESTORE_REST
72fe4858 612 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 613 TRACE_IRQS_OFF
1da177e4
LT
614 jmp int_with_check
615 CFI_ENDPROC
bcddc015 616END(system_call)
1da177e4
LT
617
618/*
619 * Certain special system calls that need to save a complete full stack frame.
620 */
621
622 .macro PTREGSCALL label,func,arg
623 .globl \label
624\label:
625 leaq \func(%rip),%rax
626 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
627 jmp ptregscall_common
4b787e0b 628END(\label)
1da177e4
LT
629 .endm
630
7effaa88
JB
631 CFI_STARTPROC
632
1da177e4
LT
633 PTREGSCALL stub_clone, sys_clone, %r8
634 PTREGSCALL stub_fork, sys_fork, %rdi
635 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
636 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
637 PTREGSCALL stub_iopl, sys_iopl, %rsi
638
639ENTRY(ptregscall_common)
1da177e4 640 popq %r11
7effaa88
JB
641 CFI_ADJUST_CFA_OFFSET -8
642 CFI_REGISTER rip, r11
1da177e4
LT
643 SAVE_REST
644 movq %r11, %r15
7effaa88 645 CFI_REGISTER rip, r15
1da177e4
LT
646 FIXUP_TOP_OF_STACK %r11
647 call *%rax
648 RESTORE_TOP_OF_STACK %r11
649 movq %r15, %r11
7effaa88 650 CFI_REGISTER rip, r11
1da177e4
LT
651 RESTORE_REST
652 pushq %r11
7effaa88
JB
653 CFI_ADJUST_CFA_OFFSET 8
654 CFI_REL_OFFSET rip, 0
1da177e4
LT
655 ret
656 CFI_ENDPROC
4b787e0b 657END(ptregscall_common)
1da177e4
LT
658
659ENTRY(stub_execve)
660 CFI_STARTPROC
661 popq %r11
7effaa88
JB
662 CFI_ADJUST_CFA_OFFSET -8
663 CFI_REGISTER rip, r11
1da177e4 664 SAVE_REST
1da177e4 665 FIXUP_TOP_OF_STACK %r11
5d119b2c 666 movq %rsp, %rcx
1da177e4 667 call sys_execve
1da177e4 668 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
669 movq %rax,RAX(%rsp)
670 RESTORE_REST
671 jmp int_ret_from_sys_call
672 CFI_ENDPROC
4b787e0b 673END(stub_execve)
1da177e4
LT
674
675/*
676 * sigreturn is special because it needs to restore all registers on return.
677 * This cannot be done with SYSRET, so use the IRET return path instead.
678 */
679ENTRY(stub_rt_sigreturn)
680 CFI_STARTPROC
7effaa88
JB
681 addq $8, %rsp
682 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
683 SAVE_REST
684 movq %rsp,%rdi
685 FIXUP_TOP_OF_STACK %r11
686 call sys_rt_sigreturn
687 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
688 RESTORE_REST
689 jmp int_ret_from_sys_call
690 CFI_ENDPROC
4b787e0b 691END(stub_rt_sigreturn)
1da177e4 692
7effaa88
JB
693/*
694 * initial frame state for interrupts and exceptions
695 */
696 .macro _frame ref
697 CFI_STARTPROC simple
adf14236 698 CFI_SIGNAL_FRAME
7effaa88
JB
699 CFI_DEF_CFA rsp,SS+8-\ref
700 /*CFI_REL_OFFSET ss,SS-\ref*/
701 CFI_REL_OFFSET rsp,RSP-\ref
702 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
703 /*CFI_REL_OFFSET cs,CS-\ref*/
704 CFI_REL_OFFSET rip,RIP-\ref
705 .endm
706
707/* initial frame state for interrupts (and exceptions without error code) */
708#define INTR_FRAME _frame RIP
709/* initial frame state for exceptions with error code (and interrupts with
710 vector already pushed) */
711#define XCPT_FRAME _frame ORIG_RAX
712
1da177e4
LT
713/*
714 * Interrupt entry/exit.
715 *
716 * Interrupt entry points save only callee clobbered registers in fast path.
717 *
718 * Entry runs with interrupts off.
719 */
720
721/* 0(%rsp): interrupt number */
722 .macro interrupt func
1da177e4 723 cld
1da177e4
LT
724 SAVE_ARGS
725 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6 726 pushq %rbp
097a0788
GC
727 /*
728 * Save rbp twice: One is for marking the stack frame, as usual, and the
729 * other, to fill pt_regs properly. This is because bx comes right
730 * before the last saved register in that structure, and not bp. If the
731 * base pointer were in the place bx is today, this would not be needed.
732 */
733 movq %rbp, -8(%rsp)
1de9c3f6
JB
734 CFI_ADJUST_CFA_OFFSET 8
735 CFI_REL_OFFSET rbp, 0
736 movq %rsp,%rbp
737 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
738 testl $3,CS(%rdi)
739 je 1f
72fe4858 740 SWAPGS
96e54049
AK
741 /* irqcount is used to check if a CPU is already on an interrupt
742 stack or not. While this is essentially redundant with preempt_count
743 it is a little cheaper to use a separate counter in the PDA
744 (short of moving irq_enter into assembly, which would be too
745 much work) */
7461: incl %gs:pda_irqcount
1de9c3f6 747 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 748 push %rbp # backlink for old unwinder
2601e64d
IM
749 /*
750 * We entered an interrupt context - irqs are off:
751 */
752 TRACE_IRQS_OFF
1da177e4
LT
753 call \func
754 .endm
755
756ENTRY(common_interrupt)
7effaa88 757 XCPT_FRAME
1da177e4
LT
758 interrupt do_IRQ
759 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 760ret_from_intr:
72fe4858 761 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 762 TRACE_IRQS_OFF
3829ee6b 763 decl %gs:pda_irqcount
1de9c3f6 764 leaveq
7effaa88 765 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 766 CFI_ADJUST_CFA_OFFSET -8
7effaa88 767exit_intr:
1da177e4
LT
768 GET_THREAD_INFO(%rcx)
769 testl $3,CS-ARGOFFSET(%rsp)
770 je retint_kernel
771
772 /* Interrupt came from user space */
773 /*
774 * Has a correct top of stack, but a partial stack frame
775 * %rcx: thread info. Interrupts off.
776 */
777retint_with_reschedule:
778 movl $_TIF_WORK_MASK,%edi
7effaa88 779retint_check:
10cd706d 780 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 781 movl TI_flags(%rcx),%edx
1da177e4 782 andl %edi,%edx
7effaa88 783 CFI_REMEMBER_STATE
1da177e4 784 jnz retint_careful
10cd706d
PZ
785
786retint_swapgs: /* return to user-space */
2601e64d
IM
787 /*
788 * The iretq could re-enable interrupts:
789 */
72fe4858 790 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 791 TRACE_IRQS_IRETQ
72fe4858 792 SWAPGS
2601e64d
IM
793 jmp restore_args
794
10cd706d 795retint_restore_args: /* return to kernel space */
72fe4858 796 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
797 /*
798 * The iretq could re-enable interrupts:
799 */
800 TRACE_IRQS_IRETQ
801restore_args:
3701d863
IM
802 RESTORE_ARGS 0,8,0
803
f7f3d791 804irq_return:
72fe4858 805 INTERRUPT_RETURN
3701d863
IM
806
807 .section __ex_table, "a"
808 .quad irq_return, bad_iret
809 .previous
810
811#ifdef CONFIG_PARAVIRT
72fe4858 812ENTRY(native_iret)
1da177e4
LT
813 iretq
814
815 .section __ex_table,"a"
72fe4858 816 .quad native_iret, bad_iret
1da177e4 817 .previous
3701d863
IM
818#endif
819
1da177e4 820 .section .fixup,"ax"
1da177e4 821bad_iret:
3aa4b37d
RM
822 /*
823 * The iret traps when the %cs or %ss being restored is bogus.
824 * We've lost the original trap vector and error code.
825 * #GPF is the most likely one to get for an invalid selector.
826 * So pretend we completed the iret and took the #GPF in user mode.
827 *
828 * We are now running with the kernel GS after exception recovery.
829 * But error_entry expects us to have user GS to match the user %cs,
830 * so swap back.
831 */
832 pushq $0
833
834 SWAPGS
835 jmp general_protection
836
72fe4858
GOC
837 .previous
838
7effaa88 839 /* edi: workmask, edx: work */
1da177e4 840retint_careful:
7effaa88 841 CFI_RESTORE_STATE
1da177e4
LT
842 bt $TIF_NEED_RESCHED,%edx
843 jnc retint_signal
2601e64d 844 TRACE_IRQS_ON
72fe4858 845 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 846 pushq %rdi
7effaa88 847 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
848 call schedule
849 popq %rdi
7effaa88 850 CFI_ADJUST_CFA_OFFSET -8
1da177e4 851 GET_THREAD_INFO(%rcx)
72fe4858 852 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 853 TRACE_IRQS_OFF
1da177e4
LT
854 jmp retint_check
855
856retint_signal:
8f4d37ec 857 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 858 jz retint_swapgs
2601e64d 859 TRACE_IRQS_ON
72fe4858 860 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
861 SAVE_REST
862 movq $-1,ORIG_RAX(%rsp)
3829ee6b 863 xorl %esi,%esi # oldset
1da177e4
LT
864 movq %rsp,%rdi # &pt_regs
865 call do_notify_resume
866 RESTORE_REST
72fe4858 867 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 868 TRACE_IRQS_OFF
be9e6870 869 GET_THREAD_INFO(%rcx)
eca91e78 870 jmp retint_with_reschedule
1da177e4
LT
871
872#ifdef CONFIG_PREEMPT
873 /* Returning to kernel space. Check if we need preemption */
874 /* rcx: threadinfo. interrupts off. */
b06babac 875ENTRY(retint_kernel)
26ccb8a7 876 cmpl $0,TI_preempt_count(%rcx)
1da177e4 877 jnz retint_restore_args
26ccb8a7 878 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
879 jnc retint_restore_args
880 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
881 jnc retint_restore_args
882 call preempt_schedule_irq
883 jmp exit_intr
884#endif
4b787e0b 885
1da177e4 886 CFI_ENDPROC
4b787e0b 887END(common_interrupt)
1da177e4
LT
888
889/*
890 * APIC interrupts.
891 */
892 .macro apicinterrupt num,func
7effaa88 893 INTR_FRAME
19eadf98 894 pushq $~(\num)
7effaa88 895 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
896 interrupt \func
897 jmp ret_from_intr
898 CFI_ENDPROC
899 .endm
900
901ENTRY(thermal_interrupt)
902 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 903END(thermal_interrupt)
1da177e4 904
89b831ef
JS
905ENTRY(threshold_interrupt)
906 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 907END(threshold_interrupt)
89b831ef 908
1da177e4
LT
909#ifdef CONFIG_SMP
910ENTRY(reschedule_interrupt)
911 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 912END(reschedule_interrupt)
1da177e4 913
e5bc8b6b
AK
914 .macro INVALIDATE_ENTRY num
915ENTRY(invalidate_interrupt\num)
916 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 917END(invalidate_interrupt\num)
e5bc8b6b
AK
918 .endm
919
920 INVALIDATE_ENTRY 0
921 INVALIDATE_ENTRY 1
922 INVALIDATE_ENTRY 2
923 INVALIDATE_ENTRY 3
924 INVALIDATE_ENTRY 4
925 INVALIDATE_ENTRY 5
926 INVALIDATE_ENTRY 6
927 INVALIDATE_ENTRY 7
1da177e4
LT
928
929ENTRY(call_function_interrupt)
930 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 931END(call_function_interrupt)
3b16cf87
JA
932ENTRY(call_function_single_interrupt)
933 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
934END(call_function_single_interrupt)
61014292
EB
935ENTRY(irq_move_cleanup_interrupt)
936 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
937END(irq_move_cleanup_interrupt)
1da177e4
LT
938#endif
939
1da177e4
LT
940ENTRY(apic_timer_interrupt)
941 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 942END(apic_timer_interrupt)
1da177e4 943
1812924b
CW
944ENTRY(uv_bau_message_intr1)
945 apicinterrupt 220,uv_bau_message_interrupt
946END(uv_bau_message_intr1)
947
1da177e4
LT
948ENTRY(error_interrupt)
949 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 950END(error_interrupt)
1da177e4
LT
951
952ENTRY(spurious_interrupt)
953 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 954END(spurious_interrupt)
1da177e4
LT
955
956/*
957 * Exception entry points.
958 */
959 .macro zeroentry sym
7effaa88 960 INTR_FRAME
fab58420 961 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 962 pushq $0 /* push error code/oldrax */
7effaa88 963 CFI_ADJUST_CFA_OFFSET 8
1da177e4 964 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 965 CFI_ADJUST_CFA_OFFSET 8
37550907 966 CFI_REL_OFFSET rax,0
1da177e4
LT
967 leaq \sym(%rip),%rax
968 jmp error_entry
7effaa88 969 CFI_ENDPROC
1da177e4
LT
970 .endm
971
972 .macro errorentry sym
7effaa88 973 XCPT_FRAME
fab58420 974 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 975 pushq %rax
7effaa88 976 CFI_ADJUST_CFA_OFFSET 8
37550907 977 CFI_REL_OFFSET rax,0
1da177e4
LT
978 leaq \sym(%rip),%rax
979 jmp error_entry
7effaa88 980 CFI_ENDPROC
1da177e4
LT
981 .endm
982
983 /* error code is on the stack already */
984 /* handle NMI like exceptions that can happen everywhere */
2601e64d 985 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
986 SAVE_ALL
987 cld
988 movl $1,%ebx
989 movl $MSR_GS_BASE,%ecx
990 rdmsr
991 testl %edx,%edx
992 js 1f
72fe4858 993 SWAPGS
1da177e4 994 xorl %ebx,%ebx
b556b35e
JB
9951:
996 .if \ist
997 movq %gs:pda_data_offset, %rbp
998 .endif
7e61a793
AH
999 .if \irqtrace
1000 TRACE_IRQS_OFF
1001 .endif
b556b35e 1002 movq %rsp,%rdi
1da177e4
LT
1003 movq ORIG_RAX(%rsp),%rsi
1004 movq $-1,ORIG_RAX(%rsp)
b556b35e 1005 .if \ist
5f8efbb9 1006 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 1007 .endif
1da177e4 1008 call \sym
b556b35e 1009 .if \ist
5f8efbb9 1010 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 1011 .endif
72fe4858 1012 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1013 .if \irqtrace
1014 TRACE_IRQS_OFF
1015 .endif
1da177e4 1016 .endm
2601e64d
IM
1017
1018 /*
1019 * "Paranoid" exit path from exception stack.
1020 * Paranoid because this is used by NMIs and cannot take
1021 * any kernel state for granted.
1022 * We don't do kernel preemption checks here, because only
1023 * NMI should be common and it does not enable IRQs and
1024 * cannot get reschedule ticks.
1025 *
1026 * "trace" is 0 for the NMI handler only, because irq-tracing
1027 * is fundamentally NMI-unsafe. (we cannot change the soft and
1028 * hard flags at once, atomically)
1029 */
1030 .macro paranoidexit trace=1
1031 /* ebx: no swapgs flag */
1032paranoid_exit\trace:
1033 testl %ebx,%ebx /* swapgs needed? */
1034 jnz paranoid_restore\trace
1035 testl $3,CS(%rsp)
1036 jnz paranoid_userspace\trace
1037paranoid_swapgs\trace:
7a0a2dff 1038 .if \trace
2601e64d 1039 TRACE_IRQS_IRETQ 0
7a0a2dff 1040 .endif
72fe4858 1041 SWAPGS_UNSAFE_STACK
2601e64d
IM
1042paranoid_restore\trace:
1043 RESTORE_ALL 8
3701d863 1044 jmp irq_return
2601e64d
IM
1045paranoid_userspace\trace:
1046 GET_THREAD_INFO(%rcx)
26ccb8a7 1047 movl TI_flags(%rcx),%ebx
2601e64d
IM
1048 andl $_TIF_WORK_MASK,%ebx
1049 jz paranoid_swapgs\trace
1050 movq %rsp,%rdi /* &pt_regs */
1051 call sync_regs
1052 movq %rax,%rsp /* switch stack for scheduling */
1053 testl $_TIF_NEED_RESCHED,%ebx
1054 jnz paranoid_schedule\trace
1055 movl %ebx,%edx /* arg3: thread flags */
1056 .if \trace
1057 TRACE_IRQS_ON
1058 .endif
72fe4858 1059 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1060 xorl %esi,%esi /* arg2: oldset */
1061 movq %rsp,%rdi /* arg1: &pt_regs */
1062 call do_notify_resume
72fe4858 1063 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1064 .if \trace
1065 TRACE_IRQS_OFF
1066 .endif
1067 jmp paranoid_userspace\trace
1068paranoid_schedule\trace:
1069 .if \trace
1070 TRACE_IRQS_ON
1071 .endif
72fe4858 1072 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 1073 call schedule
72fe4858 1074 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
1075 .if \trace
1076 TRACE_IRQS_OFF
1077 .endif
1078 jmp paranoid_userspace\trace
1079 CFI_ENDPROC
1080 .endm
1081
1da177e4
LT
1082/*
1083 * Exception entry point. This expects an error code/orig_rax on the stack
1084 * and the exception handler in %rax.
1085 */
d28c4393 1086KPROBE_ENTRY(error_entry)
7effaa88 1087 _frame RDI
37550907 1088 CFI_REL_OFFSET rax,0
1da177e4
LT
1089 /* rdi slot contains rax, oldrax contains error code */
1090 cld
1091 subq $14*8,%rsp
1092 CFI_ADJUST_CFA_OFFSET (14*8)
1093 movq %rsi,13*8(%rsp)
1094 CFI_REL_OFFSET rsi,RSI
1095 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
37550907 1096 CFI_REGISTER rax,rsi
1da177e4
LT
1097 movq %rdx,12*8(%rsp)
1098 CFI_REL_OFFSET rdx,RDX
1099 movq %rcx,11*8(%rsp)
1100 CFI_REL_OFFSET rcx,RCX
1101 movq %rsi,10*8(%rsp) /* store rax */
1102 CFI_REL_OFFSET rax,RAX
1103 movq %r8, 9*8(%rsp)
1104 CFI_REL_OFFSET r8,R8
1105 movq %r9, 8*8(%rsp)
1106 CFI_REL_OFFSET r9,R9
1107 movq %r10,7*8(%rsp)
1108 CFI_REL_OFFSET r10,R10
1109 movq %r11,6*8(%rsp)
1110 CFI_REL_OFFSET r11,R11
1111 movq %rbx,5*8(%rsp)
1112 CFI_REL_OFFSET rbx,RBX
1113 movq %rbp,4*8(%rsp)
1114 CFI_REL_OFFSET rbp,RBP
1115 movq %r12,3*8(%rsp)
1116 CFI_REL_OFFSET r12,R12
1117 movq %r13,2*8(%rsp)
1118 CFI_REL_OFFSET r13,R13
1119 movq %r14,1*8(%rsp)
1120 CFI_REL_OFFSET r14,R14
1121 movq %r15,(%rsp)
1122 CFI_REL_OFFSET r15,R15
1123 xorl %ebx,%ebx
1124 testl $3,CS(%rsp)
1125 je error_kernelspace
1126error_swapgs:
72fe4858 1127 SWAPGS
6b11d4ef
AH
1128error_sti:
1129 TRACE_IRQS_OFF
1da177e4 1130 movq %rdi,RDI(%rsp)
37550907 1131 CFI_REL_OFFSET rdi,RDI
1da177e4
LT
1132 movq %rsp,%rdi
1133 movq ORIG_RAX(%rsp),%rsi /* get error code */
1134 movq $-1,ORIG_RAX(%rsp)
1135 call *%rax
10cd706d
PZ
1136 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1137error_exit:
1138 movl %ebx,%eax
1da177e4 1139 RESTORE_REST
72fe4858 1140 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 1141 TRACE_IRQS_OFF
1da177e4
LT
1142 GET_THREAD_INFO(%rcx)
1143 testl %eax,%eax
1144 jne retint_kernel
10cd706d 1145 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 1146 movl TI_flags(%rcx),%edx
1da177e4
LT
1147 movl $_TIF_WORK_MASK,%edi
1148 andl %edi,%edx
1149 jnz retint_careful
10cd706d 1150 jmp retint_swapgs
1da177e4
LT
1151 CFI_ENDPROC
1152
1153error_kernelspace:
1154 incl %ebx
1155 /* There are two places in the kernel that can potentially fault with
1156 usergs. Handle them here. The exception handlers after
1157 iret run with kernel gs again, so don't set the user space flag.
1158 B stepping K8s sometimes report an truncated RIP for IRET
1159 exceptions returning to compat mode. Check for these here too. */
9d8ad5d6
VN
1160 leaq irq_return(%rip),%rcx
1161 cmpq %rcx,RIP(%rsp)
1da177e4 1162 je error_swapgs
9d8ad5d6
VN
1163 movl %ecx,%ecx /* zero extend */
1164 cmpq %rcx,RIP(%rsp)
1da177e4
LT
1165 je error_swapgs
1166 cmpq $gs_change,RIP(%rsp)
1167 je error_swapgs
1168 jmp error_sti
d28c4393 1169KPROBE_END(error_entry)
1da177e4
LT
1170
1171 /* Reload gs selector with exception handling */
1172 /* edi: new selector */
9f9d489a 1173ENTRY(native_load_gs_index)
7effaa88 1174 CFI_STARTPROC
1da177e4 1175 pushf
7effaa88 1176 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1178 SWAPGS
1da177e4
LT
1179gs_change:
1180 movl %edi,%gs
11812: mfence /* workaround */
72fe4858 1182 SWAPGS
1da177e4 1183 popf
7effaa88 1184 CFI_ADJUST_CFA_OFFSET -8
1da177e4 1185 ret
7effaa88 1186 CFI_ENDPROC
9f9d489a 1187ENDPROC(native_load_gs_index)
1da177e4
LT
1188
1189 .section __ex_table,"a"
1190 .align 8
1191 .quad gs_change,bad_gs
1192 .previous
1193 .section .fixup,"ax"
1194 /* running with kernelgs */
1195bad_gs:
72fe4858 1196 SWAPGS /* switch back to user gs */
1da177e4
LT
1197 xorl %eax,%eax
1198 movl %eax,%gs
1199 jmp 2b
1200 .previous
1201
1202/*
1203 * Create a kernel thread.
1204 *
1205 * C extern interface:
1206 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1207 *
1208 * asm input arguments:
1209 * rdi: fn, rsi: arg, rdx: flags
1210 */
1211ENTRY(kernel_thread)
1212 CFI_STARTPROC
1213 FAKE_STACK_FRAME $child_rip
1214 SAVE_ALL
1215
1216 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1217 movq %rdx,%rdi
1218 orq kernel_thread_flags(%rip),%rdi
1219 movq $-1, %rsi
1220 movq %rsp, %rdx
1221
1222 xorl %r8d,%r8d
1223 xorl %r9d,%r9d
1224
1225 # clone now
1226 call do_fork
1227 movq %rax,RAX(%rsp)
1228 xorl %edi,%edi
1229
1230 /*
1231 * It isn't worth to check for reschedule here,
1232 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks.
1235 * [Hopefully no generic code relies on the reschedule -AK]
1236 */
1237 RESTORE_ALL
1238 UNFAKE_STACK_FRAME
1239 ret
1240 CFI_ENDPROC
4b787e0b 1241ENDPROC(kernel_thread)
1da177e4
LT
1242
1243child_rip:
c05991ed
AK
1244 pushq $0 # fake return address
1245 CFI_STARTPROC
1da177e4
LT
1246 /*
1247 * Here we are in the child and the registers are set as they were
1248 * at kernel_thread() invocation in the parent.
1249 */
1250 movq %rdi, %rax
1251 movq %rsi, %rdi
1252 call *%rax
1253 # exit
1c5b5cfd 1254 mov %eax, %edi
1da177e4 1255 call do_exit
c05991ed 1256 CFI_ENDPROC
4b787e0b 1257ENDPROC(child_rip)
1da177e4
LT
1258
1259/*
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1261 *
1262 * C extern interface:
1263 * extern long execve(char *name, char **argv, char **envp)
1264 *
1265 * asm input arguments:
1266 * rdi: name, rsi: argv, rdx: envp
1267 *
1268 * We want to fallback into:
5d119b2c 1269 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1270 *
1271 * do_sys_execve asm fallback arguments:
5d119b2c 1272 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1273 */
3db03b4a 1274ENTRY(kernel_execve)
1da177e4
LT
1275 CFI_STARTPROC
1276 FAKE_STACK_FRAME $0
1277 SAVE_ALL
5d119b2c 1278 movq %rsp,%rcx
1da177e4
LT
1279 call sys_execve
1280 movq %rax, RAX(%rsp)
1281 RESTORE_REST
1282 testq %rax,%rax
1283 je int_ret_from_sys_call
1284 RESTORE_ARGS
1285 UNFAKE_STACK_FRAME
1286 ret
1287 CFI_ENDPROC
3db03b4a 1288ENDPROC(kernel_execve)
1da177e4 1289
0f2fbdcb 1290KPROBE_ENTRY(page_fault)
1da177e4 1291 errorentry do_page_fault
d28c4393 1292KPROBE_END(page_fault)
1da177e4
LT
1293
1294ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
4b787e0b 1296END(coprocessor_error)
1da177e4
LT
1297
1298ENTRY(simd_coprocessor_error)
1299 zeroentry do_simd_coprocessor_error
4b787e0b 1300END(simd_coprocessor_error)
1da177e4
LT
1301
1302ENTRY(device_not_available)
e407d620 1303 zeroentry do_device_not_available
4b787e0b 1304END(device_not_available)
1da177e4
LT
1305
1306 /* runs on exception stack */
0f2fbdcb 1307KPROBE_ENTRY(debug)
7effaa88 1308 INTR_FRAME
09402947 1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1310 pushq $0
1311 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1312 paranoidentry do_debug, DEBUG_STACK
2601e64d 1313 paranoidexit
d28c4393 1314KPROBE_END(debug)
1da177e4
LT
1315
1316 /* runs on exception stack */
eddb6fb9 1317KPROBE_ENTRY(nmi)
7effaa88 1318 INTR_FRAME
09402947 1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1320 pushq $-1
7effaa88 1321 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1322 paranoidentry do_nmi, 0, 0
1323#ifdef CONFIG_TRACE_IRQFLAGS
1324 paranoidexit 0
1325#else
1326 jmp paranoid_exit1
1327 CFI_ENDPROC
1328#endif
d28c4393 1329KPROBE_END(nmi)
6fefb0d1 1330
0f2fbdcb 1331KPROBE_ENTRY(int3)
b556b35e 1332 INTR_FRAME
09402947 1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
b556b35e
JB
1334 pushq $0
1335 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1336 paranoidentry do_int3, DEBUG_STACK
2601e64d 1337 jmp paranoid_exit1
b556b35e 1338 CFI_ENDPROC
d28c4393 1339KPROBE_END(int3)
1da177e4
LT
1340
1341ENTRY(overflow)
1342 zeroentry do_overflow
4b787e0b 1343END(overflow)
1da177e4
LT
1344
1345ENTRY(bounds)
1346 zeroentry do_bounds
4b787e0b 1347END(bounds)
1da177e4
LT
1348
1349ENTRY(invalid_op)
1350 zeroentry do_invalid_op
4b787e0b 1351END(invalid_op)
1da177e4
LT
1352
1353ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
4b787e0b 1355END(coprocessor_segment_overrun)
1da177e4 1356
1da177e4
LT
1357 /* runs on exception stack */
1358ENTRY(double_fault)
7effaa88 1359 XCPT_FRAME
09402947 1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1361 paranoidentry do_double_fault
2601e64d 1362 jmp paranoid_exit1
1da177e4 1363 CFI_ENDPROC
4b787e0b 1364END(double_fault)
1da177e4
LT
1365
1366ENTRY(invalid_TSS)
1367 errorentry do_invalid_TSS
4b787e0b 1368END(invalid_TSS)
1da177e4
LT
1369
1370ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
4b787e0b 1372END(segment_not_present)
1da177e4
LT
1373
1374 /* runs on exception stack */
1375ENTRY(stack_segment)
7effaa88 1376 XCPT_FRAME
09402947 1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1378 paranoidentry do_stack_segment
2601e64d 1379 jmp paranoid_exit1
1da177e4 1380 CFI_ENDPROC
4b787e0b 1381END(stack_segment)
1da177e4 1382
0f2fbdcb 1383KPROBE_ENTRY(general_protection)
1da177e4 1384 errorentry do_general_protection
d28c4393 1385KPROBE_END(general_protection)
1da177e4
LT
1386
1387ENTRY(alignment_check)
1388 errorentry do_alignment_check
4b787e0b 1389END(alignment_check)
1da177e4
LT
1390
1391ENTRY(divide_error)
1392 zeroentry do_divide_error
4b787e0b 1393END(divide_error)
1da177e4
LT
1394
1395ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
4b787e0b 1397END(spurious_interrupt_bug)
1da177e4
LT
1398
1399#ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401ENTRY(machine_check)
7effaa88 1402 INTR_FRAME
09402947 1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1404 pushq $0
1405 CFI_ADJUST_CFA_OFFSET 8
1406 paranoidentry do_machine_check
2601e64d 1407 jmp paranoid_exit1
1da177e4 1408 CFI_ENDPROC
4b787e0b 1409END(machine_check)
1da177e4
LT
1410#endif
1411
2699500b 1412/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1413ENTRY(call_softirq)
7effaa88 1414 CFI_STARTPROC
2699500b
AK
1415 push %rbp
1416 CFI_ADJUST_CFA_OFFSET 8
1417 CFI_REL_OFFSET rbp,0
1418 mov %rsp,%rbp
1419 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1420 incl %gs:pda_irqcount
2699500b
AK
1421 cmove %gs:pda_irqstackptr,%rsp
1422 push %rbp # backlink for old unwinder
ed6b676c 1423 call __do_softirq
2699500b 1424 leaveq
7effaa88 1425 CFI_DEF_CFA_REGISTER rsp
2699500b 1426 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1427 decl %gs:pda_irqcount
ed6b676c 1428 ret
7effaa88 1429 CFI_ENDPROC
4b787e0b 1430ENDPROC(call_softirq)
75154f40
AK
1431
1432KPROBE_ENTRY(ignore_sysret)
1433 CFI_STARTPROC
1434 mov $-ENOSYS,%eax
1435 sysret
1436 CFI_ENDPROC
1437ENDPROC(ignore_sysret)
3d75e1b8
JF
1438
1439#ifdef CONFIG_XEN
1440ENTRY(xen_hypervisor_callback)
1441 zeroentry xen_do_hypervisor_callback
1442END(xen_hypervisor_callback)
1443
1444/*
1445# A note on the "critical region" in our callback handler.
1446# We want to avoid stacking callback handlers due to events occurring
1447# during handling of the last event. To do this, we keep events disabled
1448# until we've done all processing. HOWEVER, we must enable events before
1449# popping the stack frame (can't be done atomically) and so it would still
1450# be possible to get enough handler activations to overflow the stack.
1451# Although unlikely, bugs of that kind are hard to track down, so we'd
1452# like to avoid the possibility.
1453# So, on entry to the handler we detect whether we interrupted an
1454# existing activation in its critical region -- if so, we pop the current
1455# activation and restart the handler using the previous one.
1456*/
1457ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1458 CFI_STARTPROC
1459/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1460 see the correct pointer to the pt_regs */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame
1462 CFI_ENDPROC
1463 CFI_DEFAULT_STACK
146411: incl %gs:pda_irqcount
1465 movq %rsp,%rbp
1466 CFI_DEF_CFA_REGISTER rbp
1467 cmovzq %gs:pda_irqstackptr,%rsp
1468 pushq %rbp # backlink for old unwinder
1469 call xen_evtchn_do_upcall
1470 popq %rsp
1471 CFI_DEF_CFA_REGISTER rsp
1472 decl %gs:pda_irqcount
1473 jmp error_exit
1474 CFI_ENDPROC
1475END(do_hypervisor_callback)
1476
1477/*
1478# Hypervisor uses this for application faults while it executes.
1479# We get here for two reasons:
1480# 1. Fault while reloading DS, ES, FS or GS
1481# 2. Fault while executing IRET
1482# Category 1 we do not need to fix up as Xen has already reloaded all segment
1483# registers that could be reloaded and zeroed the others.
1484# Category 2 we fix up by killing the current process. We cannot use the
1485# normal Linux return path in this case because if we use the IRET hypercall
1486# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487# We distinguish between categories by comparing each saved segment register
1488# with its current contents: any discrepancy means we in category 1.
1489*/
1490ENTRY(xen_failsafe_callback)
4a5c3e77
JF
1491 framesz = (RIP-0x30) /* workaround buggy gas */
1492 _frame framesz
3d75e1b8
JF
1493 CFI_REL_OFFSET rcx, 0
1494 CFI_REL_OFFSET r11, 8
1495 movw %ds,%cx
1496 cmpw %cx,0x10(%rsp)
1497 CFI_REMEMBER_STATE
1498 jne 1f
1499 movw %es,%cx
1500 cmpw %cx,0x18(%rsp)
1501 jne 1f
1502 movw %fs,%cx
1503 cmpw %cx,0x20(%rsp)
1504 jne 1f
1505 movw %gs,%cx
1506 cmpw %cx,0x28(%rsp)
1507 jne 1f
1508 /* All segments match their saved values => Category 2 (Bad IRET). */
1509 movq (%rsp),%rcx
1510 CFI_RESTORE rcx
1511 movq 8(%rsp),%r11
1512 CFI_RESTORE r11
1513 addq $0x30,%rsp
1514 CFI_ADJUST_CFA_OFFSET -0x30
4a5c3e77
JF
1515 pushq $0
1516 CFI_ADJUST_CFA_OFFSET 8
1517 pushq %r11
1518 CFI_ADJUST_CFA_OFFSET 8
1519 pushq %rcx
1520 CFI_ADJUST_CFA_OFFSET 8
1521 jmp general_protection
3d75e1b8
JF
1522 CFI_RESTORE_STATE
15231: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1524 movq (%rsp),%rcx
1525 CFI_RESTORE rcx
1526 movq 8(%rsp),%r11
1527 CFI_RESTORE r11
1528 addq $0x30,%rsp
1529 CFI_ADJUST_CFA_OFFSET -0x30
1530 pushq $0
1531 CFI_ADJUST_CFA_OFFSET 8
1532 SAVE_ALL
1533 jmp error_exit
1534 CFI_ENDPROC
3d75e1b8
JF
1535END(xen_failsafe_callback)
1536
1537#endif /* CONFIG_XEN */