]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/kernel/entry-common.S
Merge branch 'for-rmk' of git://git.pengutronix.de/git/imx/linux-2.6
[net-next-2.6.git] / arch / arm / kernel / entry-common.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-common.S
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4 10
1da177e4 11#include <asm/unistd.h>
395a59d0 12#include <asm/ftrace.h>
a09e64fb 13#include <mach/entry-macro.S>
c4c5716e 14#include <asm/unwind.h>
1da177e4
LT
15
16#include "entry-header.S"
17
1da177e4
LT
18
19 .align 5
20/*
21 * This is the fast syscall return path. We do as little as
22 * possible here, and this includes saving r0 back into the SVC
23 * stack.
24 */
25ret_fast_syscall:
c4c5716e
CM
26 UNWIND(.fnstart )
27 UNWIND(.cantunwind )
1ec42c0c 28 disable_irq @ disable interrupts
1da177e4
LT
29 ldr r1, [tsk, #TI_FLAGS]
30 tst r1, #_TIF_WORK_MASK
31 bne fast_work_pending
f4dc9a4c 32
f80dff9d
DW
33 /* perform architecture specific actions before user return */
34 arch_ret_to_user r1, lr
35
b86040a5 36 restore_user_regs fast = 1, offset = S_OFF
c4c5716e 37 UNWIND(.fnend )
1da177e4
LT
38
39/*
40 * Ok, we need to do extra processing, enter the slow path.
41 */
42fast_work_pending:
43 str r0, [sp, #S_R0+S_OFF]! @ returned r0
44work_pending:
45 tst r1, #_TIF_NEED_RESCHED
46 bne work_resched
d0420c83 47 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
1da177e4
LT
48 beq no_work_pending
49 mov r0, sp @ 'regs'
50 mov r2, why @ 'syscall'
b2b163bb
RK
51 tst r1, #_TIF_SIGPENDING @ delivering a signal?
52 movne why, #0 @ prevent further restarts
1da177e4 53 bl do_notify_resume
a6c61e9d 54 b ret_slow_syscall @ Check work again
1da177e4
LT
55
56work_resched:
57 bl schedule
58/*
59 * "slow" syscall return path. "why" tells us if this was a real syscall.
60 */
61ENTRY(ret_to_user)
62ret_slow_syscall:
1ec42c0c 63 disable_irq @ disable interrupts
1da177e4
LT
64 ldr r1, [tsk, #TI_FLAGS]
65 tst r1, #_TIF_WORK_MASK
66 bne work_pending
67no_work_pending:
f80dff9d
DW
68 /* perform architecture specific actions before user return */
69 arch_ret_to_user r1, lr
70
b86040a5 71 restore_user_regs fast = 0, offset = 0
93ed3970 72ENDPROC(ret_to_user)
1da177e4
LT
73
74/*
75 * This is how we return from a fork.
76 */
77ENTRY(ret_from_fork)
78 bl schedule_tail
79 get_thread_info tsk
80 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
81 mov why, #1
82 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
83 beq ret_slow_syscall
84 mov r1, sp
85 mov r0, #1 @ trace exit [IP = 1]
86 bl syscall_trace
87 b ret_slow_syscall
93ed3970 88ENDPROC(ret_from_fork)
1da177e4 89
fa1b4f91
AV
90 .equ NR_syscalls,0
91#define CALL(x) .equ NR_syscalls,NR_syscalls+1
1da177e4 92#include "calls.S"
fa1b4f91
AV
93#undef CALL
94#define CALL(x) .long x
1da177e4 95
606576ce 96#ifdef CONFIG_FUNCTION_TRACER
686ff228
RV
97/*
98 * When compiling with -pg, gcc inserts a call to the mcount routine at the
99 * start of every function. In mcount, apart from the function's address (in
100 * lr), we need to get hold of the function's caller's address.
101 *
102 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
103 *
104 * bl mcount
105 *
106 * These versions have the limitation that in order for the mcount routine to
107 * be able to determine the function's caller's address, an APCS-style frame
108 * pointer (which is set up with something like the code below) is required.
109 *
110 * mov ip, sp
111 * push {fp, ip, lr, pc}
112 * sub fp, ip, #4
113 *
114 * With EABI, these frame pointers are not available unless -mapcs-frame is
115 * specified, and if building as Thumb-2, not even then.
116 *
117 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
118 * with call sites like:
119 *
120 * push {lr}
121 * bl __gnu_mcount_nc
122 *
123 * With these compilers, frame pointers are not necessary.
124 *
125 * mcount can be thought of as a function called in the middle of a subroutine
126 * call. As such, it needs to be transparent for both the caller and the
127 * callee: the original lr needs to be restored when leaving mcount, and no
128 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
129 * clobber the ip register. This is OK because the ARM calling convention
130 * allows it to be clobbered in subroutines and doesn't use it to hold
131 * parameters.)
132 */
014c257c
AS
133#ifdef CONFIG_DYNAMIC_FTRACE
134ENTRY(mcount)
28e192d6
RV
135 stmdb sp!, {r0-r3, lr}
136 mov r0, lr
137 sub r0, r0, #MCOUNT_INSN_SIZE
014c257c
AS
138
139 .globl mcount_call
140mcount_call:
28e192d6
RV
141 bl ftrace_stub
142 ldr lr, [fp, #-4] @ restore lr
143 ldmia sp!, {r0-r3, pc}
014c257c
AS
144
145ENTRY(ftrace_caller)
28e192d6
RV
146 stmdb sp!, {r0-r3, lr}
147 ldr r1, [fp, #-4]
148 mov r0, lr
149 sub r0, r0, #MCOUNT_INSN_SIZE
014c257c
AS
150
151 .globl ftrace_call
152ftrace_call:
28e192d6
RV
153 bl ftrace_stub
154 ldr lr, [fp, #-4] @ restore lr
155 ldmia sp!, {r0-r3, pc}
014c257c
AS
156
157#else
158
181f817e 159ENTRY(__gnu_mcount_nc)
28e192d6
RV
160 stmdb sp!, {r0-r3, lr}
161 ldr r0, =ftrace_trace_function
162 ldr r2, [r0]
163 adr r0, ftrace_stub
164 cmp r0, r2
165 bne gnu_trace
166 ldmia sp!, {r0-r3, ip, lr}
167 mov pc, ip
181f817e
UKK
168
169gnu_trace:
28e192d6
RV
170 ldr r1, [sp, #20] @ lr of instrumented routine
171 mov r0, lr
172 sub r0, r0, #MCOUNT_INSN_SIZE
173 mov lr, pc
174 mov pc, r2
175 ldmia sp!, {r0-r3, ip, lr}
176 mov pc, ip
181f817e 177
014c257c 178ENTRY(mcount)
28e192d6
RV
179 stmdb sp!, {r0-r3, lr}
180 ldr r0, =ftrace_trace_function
181 ldr r2, [r0]
182 adr r0, ftrace_stub
183 cmp r0, r2
184 bne trace
185 ldr lr, [fp, #-4] @ restore lr
186 ldmia sp!, {r0-r3, pc}
014c257c
AS
187
188trace:
28e192d6
RV
189 ldr r1, [fp, #-4] @ lr of instrumented routine
190 mov r0, lr
191 sub r0, r0, #MCOUNT_INSN_SIZE
192 mov lr, pc
193 mov pc, r2
194 ldr lr, [fp, #-4] @ restore lr
195 ldmia sp!, {r0-r3, pc}
014c257c
AS
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
198
199 .globl ftrace_stub
200ftrace_stub:
28e192d6 201 mov pc, lr
014c257c 202
606576ce 203#endif /* CONFIG_FUNCTION_TRACER */
014c257c 204
1da177e4
LT
205/*=============================================================================
206 * SWI handler
207 *-----------------------------------------------------------------------------
208 */
209
210 /* If we're optimising for StrongARM the resulting code won't
211 run on an ARM7 and we can save a couple of instructions.
212 --pb */
213#ifdef CONFIG_CPU_ARM710
3f2829a3
NP
214#define A710(code...) code
215.Larm710bug:
1da177e4
LT
216 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
217 mov r0, r0
1da177e4 218 add sp, sp, #S_FRAME_SIZE
60ac133a 219 subs pc, lr, #4
1da177e4 220#else
3f2829a3 221#define A710(code...)
1da177e4
LT
222#endif
223
224 .align 5
225ENTRY(vector_swi)
f4dc9a4c
RK
226 sub sp, sp, #S_FRAME_SIZE
227 stmia sp, {r0 - r12} @ Calling r0 - r12
b86040a5
CM
228 ARM( add r8, sp, #S_PC )
229 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
230 THUMB( mov r8, sp )
231 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
f4dc9a4c
RK
232 mrs r8, spsr @ called from non-FIQ mode, so ok.
233 str lr, [sp, #S_PC] @ Save calling PC
234 str r8, [sp, #S_PSR] @ Save CPSR
235 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
1da177e4 236 zero_fp
e0f9f4a6
RK
237
238 /*
239 * Get the system call number.
240 */
3f2829a3 241
dd35afc2 242#if defined(CONFIG_OABI_COMPAT)
3f2829a3 243
dd35afc2
NP
244 /*
245 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
246 * value to determine if it is an EABI or an old ABI call.
247 */
248#ifdef CONFIG_ARM_THUMB
249 tst r8, #PSR_T_BIT
250 movne r10, #0 @ no thumb OABI emulation
251 ldreq r10, [lr, #-4] @ get SWI instruction
252#else
253 ldr r10, [lr, #-4] @ get SWI instruction
254 A710( and ip, r10, #0x0f000000 @ check for SWI )
255 A710( teq ip, #0x0f000000 )
256 A710( bne .Larm710bug )
257#endif
26584853
CM
258#ifdef CONFIG_CPU_ENDIAN_BE8
259 rev r10, r10 @ little endian instruction
260#endif
dd35afc2
NP
261
262#elif defined(CONFIG_AEABI)
263
264 /*
265 * Pure EABI user space always put syscall number into scno (r7).
266 */
3f2829a3
NP
267 A710( ldr ip, [lr, #-4] @ get SWI instruction )
268 A710( and ip, ip, #0x0f000000 @ check for SWI )
269 A710( teq ip, #0x0f000000 )
270 A710( bne .Larm710bug )
dd35afc2 271
3f2829a3 272#elif defined(CONFIG_ARM_THUMB)
dd35afc2
NP
273
274 /* Legacy ABI only, possibly thumb mode. */
e0f9f4a6
RK
275 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
276 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
277 ldreq scno, [lr, #-4]
dd35afc2 278
e0f9f4a6 279#else
dd35afc2
NP
280
281 /* Legacy ABI only. */
e0f9f4a6 282 ldr scno, [lr, #-4] @ get SWI instruction
3f2829a3
NP
283 A710( and ip, scno, #0x0f000000 @ check for SWI )
284 A710( teq ip, #0x0f000000 )
285 A710( bne .Larm710bug )
dd35afc2 286
e0f9f4a6 287#endif
1da177e4
LT
288
289#ifdef CONFIG_ALIGNMENT_TRAP
290 ldr ip, __cr_alignment
291 ldr ip, [ip]
292 mcr p15, 0, ip, c1, c0 @ update control register
293#endif
1ec42c0c 294 enable_irq
1da177e4 295
1da177e4 296 get_thread_info tsk
dd35afc2 297 adr tbl, sys_call_table @ load syscall table pointer
1da177e4 298 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
dd35afc2
NP
299
300#if defined(CONFIG_OABI_COMPAT)
301 /*
302 * If the swi argument is zero, this is an EABI call and we do nothing.
303 *
304 * If this is an old ABI call, get the syscall number into scno and
305 * get the old ABI syscall table address.
306 */
307 bics r10, r10, #0xff000000
308 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
309 ldrne tbl, =sys_oabi_call_table
310#elif !defined(CONFIG_AEABI)
1da177e4 311 bic scno, scno, #0xff000000 @ mask off SWI op-code
e0f9f4a6 312 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
3f2829a3 313#endif
dd35afc2 314
3f2829a3 315 stmdb sp!, {r4, r5} @ push fifth and sixth args
1da177e4
LT
316 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
317 bne __sys_trace
318
1da177e4 319 cmp scno, #NR_syscalls @ check upper syscall limit
b86040a5 320 adr lr, BSYM(ret_fast_syscall) @ return address
1da177e4
LT
321 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
322
323 add r1, sp, #S_OFF
3242: mov why, #0 @ no longer a real syscall
e0f9f4a6
RK
325 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
326 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
1da177e4
LT
327 bcs arm_syscall
328 b sys_ni_syscall @ not private func
93ed3970 329ENDPROC(vector_swi)
1da177e4
LT
330
331 /*
332 * This is the really slow path. We're going to be doing
333 * context switches, and waiting for our parent to respond.
334 */
335__sys_trace:
3f471126 336 mov r2, scno
1da177e4
LT
337 add r1, sp, #S_OFF
338 mov r0, #0 @ trace entry [IP = 0]
339 bl syscall_trace
340
b86040a5 341 adr lr, BSYM(__sys_trace_return) @ return address
3f471126 342 mov scno, r0 @ syscall number (possibly new)
1da177e4
LT
343 add r1, sp, #S_R0 + S_OFF @ pointer to regs
344 cmp scno, #NR_syscalls @ check upper syscall limit
345 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
346 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
347 b 2b
348
349__sys_trace_return:
350 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
3f471126 351 mov r2, scno
1da177e4
LT
352 mov r1, sp
353 mov r0, #1 @ trace exit [IP = 1]
354 bl syscall_trace
355 b ret_slow_syscall
356
357 .align 5
358#ifdef CONFIG_ALIGNMENT_TRAP
359 .type __cr_alignment, #object
360__cr_alignment:
361 .word cr_alignment
dd35afc2
NP
362#endif
363 .ltorg
364
365/*
366 * This is the syscall table declaration for native ABI syscalls.
367 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
368 */
369#define ABI(native, compat) native
370#ifdef CONFIG_AEABI
371#define OBSOLETE(syscall) sys_ni_syscall
372#else
373#define OBSOLETE(syscall) syscall
1da177e4
LT
374#endif
375
376 .type sys_call_table, #object
377ENTRY(sys_call_table)
378#include "calls.S"
dd35afc2
NP
379#undef ABI
380#undef OBSOLETE
1da177e4
LT
381
382/*============================================================================
383 * Special system call wrappers
384 */
385@ r0 = syscall number
567bd980 386@ r8 = syscall table
1da177e4 387sys_syscall:
5247593c 388 bic scno, r0, #__NR_OABI_SYSCALL_BASE
1da177e4
LT
389 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
390 cmpne scno, #NR_syscalls @ check range
391 stmloia sp, {r5, r6} @ shuffle args
392 movlo r0, r1
393 movlo r1, r2
394 movlo r2, r3
395 movlo r3, r4
396 ldrlo pc, [tbl, scno, lsl #2]
397 b sys_ni_syscall
93ed3970 398ENDPROC(sys_syscall)
1da177e4
LT
399
400sys_fork_wrapper:
401 add r0, sp, #S_OFF
402 b sys_fork
93ed3970 403ENDPROC(sys_fork_wrapper)
1da177e4
LT
404
405sys_vfork_wrapper:
406 add r0, sp, #S_OFF
407 b sys_vfork
93ed3970 408ENDPROC(sys_vfork_wrapper)
1da177e4
LT
409
410sys_execve_wrapper:
411 add r3, sp, #S_OFF
412 b sys_execve
93ed3970 413ENDPROC(sys_execve_wrapper)
1da177e4
LT
414
415sys_clone_wrapper:
416 add ip, sp, #S_OFF
417 str ip, [sp, #4]
418 b sys_clone
93ed3970 419ENDPROC(sys_clone_wrapper)
1da177e4 420
1da177e4
LT
421sys_sigreturn_wrapper:
422 add r0, sp, #S_OFF
653d48b2 423 mov why, #0 @ prevent syscall restart handling
1da177e4 424 b sys_sigreturn
93ed3970 425ENDPROC(sys_sigreturn_wrapper)
1da177e4
LT
426
427sys_rt_sigreturn_wrapper:
428 add r0, sp, #S_OFF
653d48b2 429 mov why, #0 @ prevent syscall restart handling
1da177e4 430 b sys_rt_sigreturn
93ed3970 431ENDPROC(sys_rt_sigreturn_wrapper)
1da177e4
LT
432
433sys_sigaltstack_wrapper:
434 ldr r2, [sp, #S_OFF + S_SP]
435 b do_sigaltstack
93ed3970 436ENDPROC(sys_sigaltstack_wrapper)
1da177e4 437
713c4815
NP
438sys_statfs64_wrapper:
439 teq r1, #88
440 moveq r1, #84
441 b sys_statfs64
93ed3970 442ENDPROC(sys_statfs64_wrapper)
713c4815
NP
443
444sys_fstatfs64_wrapper:
445 teq r1, #88
446 moveq r1, #84
447 b sys_fstatfs64
93ed3970 448ENDPROC(sys_fstatfs64_wrapper)
713c4815 449
1da177e4
LT
450/*
451 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
452 * offset, we return EINVAL.
453 */
454sys_mmap2:
455#if PAGE_SHIFT > 12
456 tst r5, #PGOFF_MASK
457 moveq r5, r5, lsr #PAGE_SHIFT - 12
458 streq r5, [sp, #4]
f8b72560 459 beq sys_mmap_pgoff
1da177e4 460 mov r0, #-EINVAL
7999d8d7 461 mov pc, lr
1da177e4
LT
462#else
463 str r5, [sp, #4]
f8b72560 464 b sys_mmap_pgoff
1da177e4 465#endif
93ed3970 466ENDPROC(sys_mmap2)
687ad019
NP
467
468#ifdef CONFIG_OABI_COMPAT
dd35afc2 469
687ad019
NP
470/*
471 * These are syscalls with argument register differences
472 */
473
474sys_oabi_pread64:
475 stmia sp, {r3, r4}
476 b sys_pread64
93ed3970 477ENDPROC(sys_oabi_pread64)
687ad019
NP
478
479sys_oabi_pwrite64:
480 stmia sp, {r3, r4}
481 b sys_pwrite64
93ed3970 482ENDPROC(sys_oabi_pwrite64)
687ad019
NP
483
484sys_oabi_truncate64:
485 mov r3, r2
486 mov r2, r1
487 b sys_truncate64
93ed3970 488ENDPROC(sys_oabi_truncate64)
687ad019
NP
489
490sys_oabi_ftruncate64:
491 mov r3, r2
492 mov r2, r1
493 b sys_ftruncate64
93ed3970 494ENDPROC(sys_oabi_ftruncate64)
687ad019
NP
495
496sys_oabi_readahead:
497 str r3, [sp]
498 mov r3, r2
499 mov r2, r1
500 b sys_readahead
93ed3970 501ENDPROC(sys_oabi_readahead)
687ad019 502
dd35afc2
NP
503/*
504 * Let's declare a second syscall table for old ABI binaries
505 * using the compatibility syscall entries.
506 */
507#define ABI(native, compat) compat
508#define OBSOLETE(syscall) syscall
509
510 .type sys_oabi_call_table, #object
511ENTRY(sys_oabi_call_table)
512#include "calls.S"
513#undef ABI
514#undef OBSOLETE
515
687ad019
NP
516#endif
517