]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/binfmt_elf.c
x86: unify include/asm-x86/linkage_[32|64].h
[net-next-2.6.git] / fs / binfmt_elf.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/stat.h>
16#include <linux/time.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/a.out.h>
20#include <linux/errno.h>
21#include <linux/signal.h>
22#include <linux/binfmts.h>
23#include <linux/string.h>
24#include <linux/file.h>
25#include <linux/fcntl.h>
26#include <linux/ptrace.h>
27#include <linux/slab.h>
28#include <linux/shm.h>
29#include <linux/personality.h>
30#include <linux/elfcore.h>
31#include <linux/init.h>
32#include <linux/highuid.h>
33#include <linux/smp.h>
1da177e4
LT
34#include <linux/compiler.h>
35#include <linux/highmem.h>
36#include <linux/pagemap.h>
37#include <linux/security.h>
38#include <linux/syscalls.h>
39#include <linux/random.h>
f4e5cc2c 40#include <linux/elf.h>
7e80d0d0 41#include <linux/utsname.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/param.h>
44#include <asm/page.h>
45
f4e5cc2c
JJ
46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *);
d4e3cc38 48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
1da177e4 49
1da177e4
LT
50/*
51 * If we don't support core dumping, then supply a NULL so we
52 * don't even try.
53 */
708e9a79 54#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
7dc0b22e 55static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
1da177e4
LT
56#else
57#define elf_core_dump NULL
58#endif
59
60#if ELF_EXEC_PAGESIZE > PAGE_SIZE
f4e5cc2c 61#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
1da177e4 62#else
f4e5cc2c 63#define ELF_MIN_ALIGN PAGE_SIZE
1da177e4
LT
64#endif
65
66#ifndef ELF_CORE_EFLAGS
67#define ELF_CORE_EFLAGS 0
68#endif
69
70#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
73
74static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
9fbbd4dd
AK
79 .min_coredump = ELF_EXEC_PAGESIZE,
80 .hasvdso = 1
1da177e4
LT
81};
82
d4e3cc38 83#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
1da177e4
LT
84
85static int set_brk(unsigned long start, unsigned long end)
86{
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
89 if (end > start) {
90 unsigned long addr;
91 down_write(&current->mm->mmap_sem);
92 addr = do_brk(start, end - start);
93 up_write(&current->mm->mmap_sem);
94 if (BAD_ADDR(addr))
95 return addr;
96 }
97 current->mm->start_brk = current->mm->brk = end;
98 return 0;
99}
100
1da177e4
LT
101/* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
f4e5cc2c
JJ
104 be in memory
105 */
1da177e4
LT
106static int padzero(unsigned long elf_bss)
107{
108 unsigned long nbyte;
109
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 if (clear_user((void __user *) elf_bss, nbyte))
114 return -EFAULT;
115 }
116 return 0;
117}
118
119/* Let's use some macros to make this stack manipulation a litle clearer */
120#ifdef CONFIG_STACK_GROWSUP
121#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122#define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
f4e5cc2c
JJ
124#define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
126 old_sp; })
1da177e4
LT
127#else
128#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129#define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132#endif
133
134static int
f4e5cc2c 135create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
1da177e4
LT
136 int interp_aout, unsigned long load_addr,
137 unsigned long interp_load_addr)
138{
139 unsigned long p = bprm->p;
140 int argc = bprm->argc;
141 int envc = bprm->envc;
142 elf_addr_t __user *argv;
143 elf_addr_t __user *envp;
144 elf_addr_t __user *sp;
145 elf_addr_t __user *u_platform;
146 const char *k_platform = ELF_PLATFORM;
147 int items;
148 elf_addr_t *elf_info;
149 int ei_index = 0;
150 struct task_struct *tsk = current;
b6a2fea3 151 struct vm_area_struct *vma;
1da177e4 152
d68c9d6a
FBH
153 /*
154 * In some cases (e.g. Hyper-Threading), we want to avoid L1
155 * evictions by the processes running on the same package. One
156 * thing we can do is to shuffle the initial stack for them.
157 */
158
159 p = arch_align_stack(p);
160
1da177e4
LT
161 /*
162 * If this architecture has a platform capability string, copy it
163 * to userspace. In some cases (Sparc), this info is impossible
164 * for userspace to get any other way, in others (i386) it is
165 * merely difficult.
166 */
1da177e4
LT
167 u_platform = NULL;
168 if (k_platform) {
169 size_t len = strlen(k_platform) + 1;
170
1da177e4
LT
171 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
172 if (__copy_to_user(u_platform, k_platform, len))
173 return -EFAULT;
174 }
175
176 /* Create the ELF interpreter info */
785d5570 177 elf_info = (elf_addr_t *)current->mm->saved_auxv;
4f9a58d7 178 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
1da177e4 179#define NEW_AUX_ENT(id, val) \
f4e5cc2c 180 do { \
785d5570
JJ
181 elf_info[ei_index++] = id; \
182 elf_info[ei_index++] = val; \
f4e5cc2c 183 } while (0)
1da177e4
LT
184
185#ifdef ARCH_DLINFO
186 /*
187 * ARCH_DLINFO must come first so PPC can do its special alignment of
188 * AUXV.
4f9a58d7
OH
189 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
190 * ARCH_DLINFO changes
1da177e4
LT
191 */
192 ARCH_DLINFO;
193#endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
f4e5cc2c 198 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
1da177e4
LT
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
785d5570
JJ
203 NEW_AUX_ENT(AT_UID, tsk->uid);
204 NEW_AUX_ENT(AT_EUID, tsk->euid);
205 NEW_AUX_ENT(AT_GID, tsk->gid);
206 NEW_AUX_ENT(AT_EGID, tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
1da177e4 208 if (k_platform) {
f4e5cc2c 209 NEW_AUX_ENT(AT_PLATFORM,
785d5570 210 (elf_addr_t)(unsigned long)u_platform);
1da177e4
LT
211 }
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
785d5570 213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
1da177e4
LT
214 }
215#undef NEW_AUX_ENT
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
219
220 /* And advance past the AT_NULL entry. */
221 ei_index += 2;
222
223 sp = STACK_ADD(p, ei_index);
224
225 items = (argc + 1) + (envc + 1);
226 if (interp_aout) {
227 items += 3; /* a.out interpreters require argv & envp too */
228 } else {
229 items += 1; /* ELF interpreters only put argc on the stack */
230 }
231 bprm->p = STACK_ROUND(sp, items);
232
233 /* Point sp at the lowest address on the stack */
234#ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
f4e5cc2c 236 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
1da177e4
LT
237#else
238 sp = (elf_addr_t __user *)bprm->p;
239#endif
240
b6a2fea3
OW
241
242 /*
243 * Grow the stack manually; some architectures have a limit on how
244 * far ahead a user-space access may be in order to grow the stack.
245 */
246 vma = find_extend_vma(current->mm, bprm->p);
247 if (!vma)
248 return -EFAULT;
249
1da177e4
LT
250 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
251 if (__put_user(argc, sp++))
252 return -EFAULT;
253 if (interp_aout) {
254 argv = sp + 2;
255 envp = argv + argc + 1;
841d5fb7
HC
256 if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
257 __put_user((elf_addr_t)(unsigned long)envp, sp++))
258 return -EFAULT;
1da177e4
LT
259 } else {
260 argv = sp;
261 envp = argv + argc + 1;
262 }
263
264 /* Populate argv and envp */
a84a5059 265 p = current->mm->arg_end = current->mm->arg_start;
1da177e4
LT
266 while (argc-- > 0) {
267 size_t len;
841d5fb7
HC
268 if (__put_user((elf_addr_t)p, argv++))
269 return -EFAULT;
b6a2fea3
OW
270 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
271 if (!len || len > MAX_ARG_STRLEN)
1da177e4
LT
272 return 0;
273 p += len;
274 }
275 if (__put_user(0, argv))
276 return -EFAULT;
277 current->mm->arg_end = current->mm->env_start = p;
278 while (envc-- > 0) {
279 size_t len;
841d5fb7
HC
280 if (__put_user((elf_addr_t)p, envp++))
281 return -EFAULT;
b6a2fea3
OW
282 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
283 if (!len || len > MAX_ARG_STRLEN)
1da177e4
LT
284 return 0;
285 p += len;
286 }
287 if (__put_user(0, envp))
288 return -EFAULT;
289 current->mm->env_end = p;
290
291 /* Put the elf_info on the stack in the right place. */
292 sp = (elf_addr_t __user *)envp + 1;
293 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
294 return -EFAULT;
295 return 0;
296}
297
298#ifndef elf_map
299
300static unsigned long elf_map(struct file *filep, unsigned long addr,
d4e3cc38 301 struct elf_phdr *eppnt, int prot, int type)
1da177e4
LT
302{
303 unsigned long map_addr;
d4e3cc38 304 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
1da177e4 305
d4e3cc38 306 down_write(&current->mm->mmap_sem);
dda6ebde
DG
307 /* mmap() will return -EINVAL if given a zero size, but a
308 * segment with zero filesize is perfectly valid */
d4e3cc38
AM
309 if (eppnt->p_filesz + pageoffset)
310 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
311 eppnt->p_filesz + pageoffset, prot, type,
312 eppnt->p_offset - pageoffset);
313 else
314 map_addr = ELF_PAGESTART(addr);
1da177e4
LT
315 up_write(&current->mm->mmap_sem);
316 return(map_addr);
317}
318
319#endif /* !elf_map */
320
321/* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
324 an ELF header */
325
f4e5cc2c 326static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
d4e3cc38 327 struct file *interpreter, unsigned long *interp_load_addr)
1da177e4
LT
328{
329 struct elf_phdr *elf_phdata;
330 struct elf_phdr *eppnt;
331 unsigned long load_addr = 0;
332 int load_addr_set = 0;
333 unsigned long last_bss = 0, elf_bss = 0;
334 unsigned long error = ~0UL;
335 int retval, i, size;
336
337 /* First of all, some simple consistency checks */
338 if (interp_elf_ex->e_type != ET_EXEC &&
339 interp_elf_ex->e_type != ET_DYN)
340 goto out;
341 if (!elf_check_arch(interp_elf_ex))
342 goto out;
343 if (!interpreter->f_op || !interpreter->f_op->mmap)
344 goto out;
345
346 /*
347 * If the size of this structure has changed, then punt, since
348 * we will be doing the wrong thing.
349 */
350 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
351 goto out;
352 if (interp_elf_ex->e_phnum < 1 ||
353 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
354 goto out;
355
356 /* Now read in all of the header information */
1da177e4
LT
357 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
358 if (size > ELF_MIN_ALIGN)
359 goto out;
f4e5cc2c 360 elf_phdata = kmalloc(size, GFP_KERNEL);
1da177e4
LT
361 if (!elf_phdata)
362 goto out;
363
f4e5cc2c
JJ
364 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
365 (char *)elf_phdata,size);
1da177e4
LT
366 error = -EIO;
367 if (retval != size) {
368 if (retval < 0)
369 error = retval;
370 goto out_close;
371 }
372
373 eppnt = elf_phdata;
f4e5cc2c
JJ
374 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
375 if (eppnt->p_type == PT_LOAD) {
376 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
377 int elf_prot = 0;
378 unsigned long vaddr = 0;
379 unsigned long k, map_addr;
380
381 if (eppnt->p_flags & PF_R)
382 elf_prot = PROT_READ;
383 if (eppnt->p_flags & PF_W)
384 elf_prot |= PROT_WRITE;
385 if (eppnt->p_flags & PF_X)
386 elf_prot |= PROT_EXEC;
387 vaddr = eppnt->p_vaddr;
388 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
389 elf_type |= MAP_FIXED;
390
391 map_addr = elf_map(interpreter, load_addr + vaddr,
d4e3cc38 392 eppnt, elf_prot, elf_type);
f4e5cc2c
JJ
393 error = map_addr;
394 if (BAD_ADDR(map_addr))
395 goto out_close;
396
397 if (!load_addr_set &&
398 interp_elf_ex->e_type == ET_DYN) {
399 load_addr = map_addr - ELF_PAGESTART(vaddr);
400 load_addr_set = 1;
401 }
402
403 /*
404 * Check to see if the section's size will overflow the
405 * allowed task size. Note that p_filesz must always be
406 * <= p_memsize so it's only necessary to check p_memsz.
407 */
408 k = load_addr + eppnt->p_vaddr;
ce51059b 409 if (BAD_ADDR(k) ||
f4e5cc2c
JJ
410 eppnt->p_filesz > eppnt->p_memsz ||
411 eppnt->p_memsz > TASK_SIZE ||
412 TASK_SIZE - eppnt->p_memsz < k) {
413 error = -ENOMEM;
414 goto out_close;
415 }
416
417 /*
418 * Find the end of the file mapping for this phdr, and
419 * keep track of the largest address we see for this.
420 */
421 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
422 if (k > elf_bss)
423 elf_bss = k;
424
425 /*
426 * Do the same thing for the memory mapping - between
427 * elf_bss and last_bss is the bss section.
428 */
429 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
430 if (k > last_bss)
431 last_bss = k;
432 }
1da177e4
LT
433 }
434
435 /*
436 * Now fill out the bss section. First pad the last page up
437 * to the page boundary, and then perform a mmap to make sure
438 * that there are zero-mapped pages up to and including the
439 * last bss page.
440 */
441 if (padzero(elf_bss)) {
442 error = -EFAULT;
443 goto out_close;
444 }
445
f4e5cc2c
JJ
446 /* What we have mapped so far */
447 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
1da177e4
LT
448
449 /* Map the last of the bss segment */
450 if (last_bss > elf_bss) {
451 down_write(&current->mm->mmap_sem);
452 error = do_brk(elf_bss, last_bss - elf_bss);
453 up_write(&current->mm->mmap_sem);
454 if (BAD_ADDR(error))
455 goto out_close;
456 }
457
d4e3cc38
AM
458 *interp_load_addr = load_addr;
459 error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
1da177e4
LT
460
461out_close:
462 kfree(elf_phdata);
463out:
464 return error;
465}
466
f4e5cc2c
JJ
467static unsigned long load_aout_interp(struct exec *interp_ex,
468 struct file *interpreter)
1da177e4
LT
469{
470 unsigned long text_data, elf_entry = ~0UL;
471 char __user * addr;
472 loff_t offset;
473
474 current->mm->end_code = interp_ex->a_text;
475 text_data = interp_ex->a_text + interp_ex->a_data;
476 current->mm->end_data = text_data;
477 current->mm->brk = interp_ex->a_bss + text_data;
478
479 switch (N_MAGIC(*interp_ex)) {
480 case OMAGIC:
481 offset = 32;
482 addr = (char __user *)0;
483 break;
484 case ZMAGIC:
485 case QMAGIC:
486 offset = N_TXTOFF(*interp_ex);
f4e5cc2c 487 addr = (char __user *)N_TXTADDR(*interp_ex);
1da177e4
LT
488 break;
489 default:
490 goto out;
491 }
492
493 down_write(&current->mm->mmap_sem);
494 do_brk(0, text_data);
495 up_write(&current->mm->mmap_sem);
496 if (!interpreter->f_op || !interpreter->f_op->read)
497 goto out;
498 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
499 goto out;
500 flush_icache_range((unsigned long)addr,
501 (unsigned long)addr + text_data);
502
1da177e4
LT
503 down_write(&current->mm->mmap_sem);
504 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
505 interp_ex->a_bss);
506 up_write(&current->mm->mmap_sem);
507 elf_entry = interp_ex->a_entry;
508
509out:
510 return elf_entry;
511}
512
513/*
514 * These are the functions used to load ELF style executables and shared
515 * libraries. There is no binary dependent code anywhere else.
516 */
517
518#define INTERPRETER_NONE 0
519#define INTERPRETER_AOUT 1
520#define INTERPRETER_ELF 2
521
913bd906 522#ifndef STACK_RND_MASK
d1cabd63 523#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
913bd906 524#endif
1da177e4
LT
525
526static unsigned long randomize_stack_top(unsigned long stack_top)
527{
528 unsigned int random_variable = 0;
529
c16b63e0
AK
530 if ((current->flags & PF_RANDOMIZE) &&
531 !(current->personality & ADDR_NO_RANDOMIZE)) {
913bd906
AK
532 random_variable = get_random_int() & STACK_RND_MASK;
533 random_variable <<= PAGE_SHIFT;
534 }
1da177e4 535#ifdef CONFIG_STACK_GROWSUP
913bd906 536 return PAGE_ALIGN(stack_top) + random_variable;
1da177e4 537#else
913bd906 538 return PAGE_ALIGN(stack_top) - random_variable;
1da177e4
LT
539#endif
540}
541
f4e5cc2c 542static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1da177e4
LT
543{
544 struct file *interpreter = NULL; /* to shut gcc up */
545 unsigned long load_addr = 0, load_bias = 0;
546 int load_addr_set = 0;
547 char * elf_interpreter = NULL;
548 unsigned int interpreter_type = INTERPRETER_NONE;
549 unsigned char ibcs2_interpreter = 0;
550 unsigned long error;
f4e5cc2c 551 struct elf_phdr *elf_ppnt, *elf_phdata;
1da177e4
LT
552 unsigned long elf_bss, elf_brk;
553 int elf_exec_fileno;
554 int retval, i;
555 unsigned int size;
d4e3cc38 556 unsigned long elf_entry, interp_load_addr = 0;
1da177e4
LT
557 unsigned long start_code, end_code, start_data, end_data;
558 unsigned long reloc_func_desc = 0;
559 char passed_fileno[6];
560 struct files_struct *files;
8de61e69 561 int executable_stack = EXSTACK_DEFAULT;
1da177e4
LT
562 unsigned long def_flags = 0;
563 struct {
564 struct elfhdr elf_ex;
565 struct elfhdr interp_elf_ex;
566 struct exec interp_ex;
567 } *loc;
568
569 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
570 if (!loc) {
571 retval = -ENOMEM;
572 goto out_ret;
573 }
574
575 /* Get the exec-header */
f4e5cc2c 576 loc->elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
577
578 retval = -ENOEXEC;
579 /* First of all, some simple consistency checks */
580 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
581 goto out;
582
583 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
584 goto out;
585 if (!elf_check_arch(&loc->elf_ex))
586 goto out;
587 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
588 goto out;
589
590 /* Now read in all of the header information */
1da177e4
LT
591 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
592 goto out;
593 if (loc->elf_ex.e_phnum < 1 ||
594 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
595 goto out;
596 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
597 retval = -ENOMEM;
f4e5cc2c 598 elf_phdata = kmalloc(size, GFP_KERNEL);
1da177e4
LT
599 if (!elf_phdata)
600 goto out;
601
f4e5cc2c
JJ
602 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
603 (char *)elf_phdata, size);
1da177e4
LT
604 if (retval != size) {
605 if (retval >= 0)
606 retval = -EIO;
607 goto out_free_ph;
608 }
609
f4e5cc2c 610 files = current->files; /* Refcounted so ok */
1da177e4
LT
611 retval = unshare_files();
612 if (retval < 0)
613 goto out_free_ph;
614 if (files == current->files) {
615 put_files_struct(files);
616 files = NULL;
617 }
618
619 /* exec will make our files private anyway, but for the a.out
620 loader stuff we need to do it earlier */
1da177e4
LT
621 retval = get_unused_fd();
622 if (retval < 0)
623 goto out_free_fh;
624 get_file(bprm->file);
625 fd_install(elf_exec_fileno = retval, bprm->file);
626
627 elf_ppnt = elf_phdata;
628 elf_bss = 0;
629 elf_brk = 0;
630
631 start_code = ~0UL;
632 end_code = 0;
633 start_data = 0;
634 end_data = 0;
635
636 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
637 if (elf_ppnt->p_type == PT_INTERP) {
638 /* This is the program interpreter used for
639 * shared libraries - for now assume that this
640 * is an a.out format binary
641 */
1da177e4
LT
642 retval = -ENOEXEC;
643 if (elf_ppnt->p_filesz > PATH_MAX ||
644 elf_ppnt->p_filesz < 2)
645 goto out_free_file;
646
647 retval = -ENOMEM;
792db3af 648 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
f4e5cc2c 649 GFP_KERNEL);
1da177e4
LT
650 if (!elf_interpreter)
651 goto out_free_file;
652
653 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
f4e5cc2c
JJ
654 elf_interpreter,
655 elf_ppnt->p_filesz);
1da177e4
LT
656 if (retval != elf_ppnt->p_filesz) {
657 if (retval >= 0)
658 retval = -EIO;
659 goto out_free_interp;
660 }
661 /* make sure path is NULL terminated */
662 retval = -ENOEXEC;
663 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
664 goto out_free_interp;
665
666 /* If the program interpreter is one of these two,
667 * then assume an iBCS2 image. Otherwise assume
668 * a native linux image.
669 */
670 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
671 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
672 ibcs2_interpreter = 1;
673
674 /*
675 * The early SET_PERSONALITY here is so that the lookup
676 * for the interpreter happens in the namespace of the
677 * to-be-execed image. SET_PERSONALITY can select an
678 * alternate root.
679 *
680 * However, SET_PERSONALITY is NOT allowed to switch
681 * this task into the new images's memory mapping
682 * policy - that is, TASK_SIZE must still evaluate to
683 * that which is appropriate to the execing application.
684 * This is because exit_mmap() needs to have TASK_SIZE
685 * evaluate to the size of the old image.
686 *
687 * So if (say) a 64-bit application is execing a 32-bit
688 * application it is the architecture's responsibility
689 * to defer changing the value of TASK_SIZE until the
690 * switch really is going to happen - do this in
691 * flush_thread(). - akpm
692 */
693 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
694
695 interpreter = open_exec(elf_interpreter);
696 retval = PTR_ERR(interpreter);
697 if (IS_ERR(interpreter))
698 goto out_free_interp;
1fb84496
AD
699
700 /*
701 * If the binary is not readable then enforce
702 * mm->dumpable = 0 regardless of the interpreter's
703 * permissions.
704 */
705 if (file_permission(interpreter, MAY_READ) < 0)
706 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
707
f4e5cc2c
JJ
708 retval = kernel_read(interpreter, 0, bprm->buf,
709 BINPRM_BUF_SIZE);
1da177e4
LT
710 if (retval != BINPRM_BUF_SIZE) {
711 if (retval >= 0)
712 retval = -EIO;
713 goto out_free_dentry;
714 }
715
716 /* Get the exec headers */
f4e5cc2c
JJ
717 loc->interp_ex = *((struct exec *)bprm->buf);
718 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
719 break;
720 }
721 elf_ppnt++;
722 }
723
724 elf_ppnt = elf_phdata;
725 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
726 if (elf_ppnt->p_type == PT_GNU_STACK) {
727 if (elf_ppnt->p_flags & PF_X)
728 executable_stack = EXSTACK_ENABLE_X;
729 else
730 executable_stack = EXSTACK_DISABLE_X;
731 break;
732 }
1da177e4
LT
733
734 /* Some simple consistency checks for the interpreter */
735 if (elf_interpreter) {
8e9073ed 736 static int warn;
1da177e4
LT
737 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
738
739 /* Now figure out which format our binary is */
740 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
741 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
742 (N_MAGIC(loc->interp_ex) != QMAGIC))
743 interpreter_type = INTERPRETER_ELF;
744
745 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
746 interpreter_type &= ~INTERPRETER_ELF;
747
8e9073ed
AK
748 if (interpreter_type == INTERPRETER_AOUT && warn < 10) {
749 printk(KERN_WARNING "a.out ELF interpreter %s is "
750 "deprecated and will not be supported "
751 "after Linux 2.6.25\n", elf_interpreter);
752 warn++;
753 }
754
1da177e4
LT
755 retval = -ELIBBAD;
756 if (!interpreter_type)
757 goto out_free_dentry;
758
759 /* Make sure only one type was selected */
760 if ((interpreter_type & INTERPRETER_ELF) &&
761 interpreter_type != INTERPRETER_ELF) {
762 // FIXME - ratelimit this before re-enabling
763 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
764 interpreter_type = INTERPRETER_ELF;
765 }
766 /* Verify the interpreter has a valid arch */
767 if ((interpreter_type == INTERPRETER_ELF) &&
768 !elf_check_arch(&loc->interp_elf_ex))
769 goto out_free_dentry;
770 } else {
771 /* Executables without an interpreter also need a personality */
772 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
773 }
774
775 /* OK, we are done with that, now set up the arg stuff,
776 and then start this sucker up */
1da177e4
LT
777 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
778 char *passed_p = passed_fileno;
779 sprintf(passed_fileno, "%d", elf_exec_fileno);
780
781 if (elf_interpreter) {
782 retval = copy_strings_kernel(1, &passed_p, bprm);
783 if (retval)
784 goto out_free_dentry;
785 bprm->argc++;
786 }
787 }
788
789 /* Flush all traces of the currently running executable */
790 retval = flush_old_exec(bprm);
791 if (retval)
792 goto out_free_dentry;
793
794 /* Discard our unneeded old files struct */
795 if (files) {
1da177e4
LT
796 put_files_struct(files);
797 files = NULL;
798 }
799
800 /* OK, This is the point of no return */
1da177e4
LT
801 current->flags &= ~PF_FORKNOEXEC;
802 current->mm->def_flags = def_flags;
803
804 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
805 may depend on the personality. */
806 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
807 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
808 current->personality |= READ_IMPLIES_EXEC;
809
f4e5cc2c 810 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1da177e4
LT
811 current->flags |= PF_RANDOMIZE;
812 arch_pick_mmap_layout(current->mm);
813
814 /* Do this so that we can load the interpreter, if need be. We will
815 change some of these later */
1da177e4 816 current->mm->free_area_cache = current->mm->mmap_base;
1363c3cd 817 current->mm->cached_hole_size = 0;
1da177e4
LT
818 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
819 executable_stack);
820 if (retval < 0) {
821 send_sig(SIGKILL, current, 0);
822 goto out_free_dentry;
823 }
824
1da177e4
LT
825 current->mm->start_stack = bprm->p;
826
827 /* Now we do a little grungy work by mmaping the ELF image into
d4e3cc38
AM
828 the correct location in memory. At this point, we assume that
829 the image should be loaded at fixed address, not at a variable
830 address. */
f4e5cc2c
JJ
831 for(i = 0, elf_ppnt = elf_phdata;
832 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
1da177e4
LT
833 int elf_prot = 0, elf_flags;
834 unsigned long k, vaddr;
835
836 if (elf_ppnt->p_type != PT_LOAD)
837 continue;
838
839 if (unlikely (elf_brk > elf_bss)) {
840 unsigned long nbyte;
841
842 /* There was a PT_LOAD segment with p_memsz > p_filesz
843 before this one. Map anonymous pages, if needed,
844 and clear the area. */
845 retval = set_brk (elf_bss + load_bias,
846 elf_brk + load_bias);
847 if (retval) {
848 send_sig(SIGKILL, current, 0);
849 goto out_free_dentry;
850 }
851 nbyte = ELF_PAGEOFFSET(elf_bss);
852 if (nbyte) {
853 nbyte = ELF_MIN_ALIGN - nbyte;
854 if (nbyte > elf_brk - elf_bss)
855 nbyte = elf_brk - elf_bss;
856 if (clear_user((void __user *)elf_bss +
857 load_bias, nbyte)) {
858 /*
859 * This bss-zeroing can fail if the ELF
f4e5cc2c 860 * file specifies odd protections. So
1da177e4
LT
861 * we don't check the return value
862 */
863 }
864 }
865 }
866
f4e5cc2c
JJ
867 if (elf_ppnt->p_flags & PF_R)
868 elf_prot |= PROT_READ;
869 if (elf_ppnt->p_flags & PF_W)
870 elf_prot |= PROT_WRITE;
871 if (elf_ppnt->p_flags & PF_X)
872 elf_prot |= PROT_EXEC;
1da177e4 873
f4e5cc2c 874 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1da177e4
LT
875
876 vaddr = elf_ppnt->p_vaddr;
877 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
878 elf_flags |= MAP_FIXED;
879 } else if (loc->elf_ex.e_type == ET_DYN) {
f4e5cc2c
JJ
880 /* Try and get dynamic programs out of the way of the
881 * default mmap base, as well as whatever program they
882 * might try to exec. This is because the brk will
883 * follow the loader, and is not movable. */
90cb28e8 884 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
1da177e4
LT
885 }
886
f4e5cc2c 887 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
d4e3cc38 888 elf_prot, elf_flags);
1da177e4
LT
889 if (BAD_ADDR(error)) {
890 send_sig(SIGKILL, current, 0);
b140f251
AK
891 retval = IS_ERR((void *)error) ?
892 PTR_ERR((void*)error) : -EINVAL;
1da177e4
LT
893 goto out_free_dentry;
894 }
895
896 if (!load_addr_set) {
897 load_addr_set = 1;
898 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
899 if (loc->elf_ex.e_type == ET_DYN) {
900 load_bias += error -
901 ELF_PAGESTART(load_bias + vaddr);
902 load_addr += load_bias;
903 reloc_func_desc = load_bias;
904 }
905 }
906 k = elf_ppnt->p_vaddr;
f4e5cc2c
JJ
907 if (k < start_code)
908 start_code = k;
909 if (start_data < k)
910 start_data = k;
1da177e4
LT
911
912 /*
913 * Check to see if the section's size will overflow the
914 * allowed task size. Note that p_filesz must always be
915 * <= p_memsz so it is only necessary to check p_memsz.
916 */
ce51059b 917 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1da177e4
LT
918 elf_ppnt->p_memsz > TASK_SIZE ||
919 TASK_SIZE - elf_ppnt->p_memsz < k) {
f4e5cc2c 920 /* set_brk can never work. Avoid overflows. */
1da177e4 921 send_sig(SIGKILL, current, 0);
b140f251 922 retval = -EINVAL;
1da177e4
LT
923 goto out_free_dentry;
924 }
925
926 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
927
928 if (k > elf_bss)
929 elf_bss = k;
930 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
931 end_code = k;
932 if (end_data < k)
933 end_data = k;
934 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
935 if (k > elf_brk)
936 elf_brk = k;
937 }
938
939 loc->elf_ex.e_entry += load_bias;
940 elf_bss += load_bias;
941 elf_brk += load_bias;
942 start_code += load_bias;
943 end_code += load_bias;
944 start_data += load_bias;
945 end_data += load_bias;
946
947 /* Calling set_brk effectively mmaps the pages that we need
948 * for the bss and break sections. We must do this before
949 * mapping in the interpreter, to make sure it doesn't wind
950 * up getting placed where the bss needs to go.
951 */
952 retval = set_brk(elf_bss, elf_brk);
953 if (retval) {
954 send_sig(SIGKILL, current, 0);
955 goto out_free_dentry;
956 }
6de50517 957 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1da177e4
LT
958 send_sig(SIGSEGV, current, 0);
959 retval = -EFAULT; /* Nobody gets to see this, but.. */
960 goto out_free_dentry;
961 }
962
963 if (elf_interpreter) {
d4e3cc38 964 if (interpreter_type == INTERPRETER_AOUT)
1da177e4
LT
965 elf_entry = load_aout_interp(&loc->interp_ex,
966 interpreter);
d4e3cc38 967 else
1da177e4
LT
968 elf_entry = load_elf_interp(&loc->interp_elf_ex,
969 interpreter,
d4e3cc38 970 &interp_load_addr);
1da177e4 971 if (BAD_ADDR(elf_entry)) {
1da177e4 972 force_sig(SIGSEGV, current);
ce51059b
CE
973 retval = IS_ERR((void *)elf_entry) ?
974 (int)elf_entry : -EINVAL;
1da177e4
LT
975 goto out_free_dentry;
976 }
977 reloc_func_desc = interp_load_addr;
978
979 allow_write_access(interpreter);
980 fput(interpreter);
981 kfree(elf_interpreter);
982 } else {
983 elf_entry = loc->elf_ex.e_entry;
5342fba5 984 if (BAD_ADDR(elf_entry)) {
ce51059b
CE
985 force_sig(SIGSEGV, current);
986 retval = -EINVAL;
5342fba5
SS
987 goto out_free_dentry;
988 }
1da177e4
LT
989 }
990
991 kfree(elf_phdata);
992
993 if (interpreter_type != INTERPRETER_AOUT)
994 sys_close(elf_exec_fileno);
995
996 set_binfmt(&elf_format);
997
547ee84c
BH
998#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
999 retval = arch_setup_additional_pages(bprm, executable_stack);
1000 if (retval < 0) {
1001 send_sig(SIGKILL, current, 0);
18c8baff 1002 goto out;
547ee84c
BH
1003 }
1004#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1005
1da177e4
LT
1006 compute_creds(bprm);
1007 current->flags &= ~PF_FORKNOEXEC;
b6a2fea3 1008 retval = create_elf_tables(bprm, &loc->elf_ex,
f4e5cc2c
JJ
1009 (interpreter_type == INTERPRETER_AOUT),
1010 load_addr, interp_load_addr);
b6a2fea3
OW
1011 if (retval < 0) {
1012 send_sig(SIGKILL, current, 0);
1013 goto out;
1014 }
1da177e4
LT
1015 /* N.B. passed_fileno might not be initialized? */
1016 if (interpreter_type == INTERPRETER_AOUT)
1017 current->mm->arg_start += strlen(passed_fileno) + 1;
1018 current->mm->end_code = end_code;
1019 current->mm->start_code = start_code;
1020 current->mm->start_data = start_data;
1021 current->mm->end_data = end_data;
1022 current->mm->start_stack = bprm->p;
1023
c1d171a0
JK
1024#ifdef arch_randomize_brk
1025 if (current->flags & PF_RANDOMIZE)
1026 current->mm->brk = current->mm->start_brk =
1027 arch_randomize_brk(current->mm);
1028#endif
1029
1da177e4
LT
1030 if (current->personality & MMAP_PAGE_ZERO) {
1031 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1032 and some applications "depend" upon this behavior.
1033 Since we do not have the power to recompile these, we
f4e5cc2c 1034 emulate the SVr4 behavior. Sigh. */
1da177e4
LT
1035 down_write(&current->mm->mmap_sem);
1036 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1037 MAP_FIXED | MAP_PRIVATE, 0);
1038 up_write(&current->mm->mmap_sem);
1039 }
1040
1041#ifdef ELF_PLAT_INIT
1042 /*
1043 * The ABI may specify that certain registers be set up in special
1044 * ways (on i386 %edx is the address of a DT_FINI function, for
1045 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1046 * that the e_entry field is the address of the function descriptor
1047 * for the startup routine, rather than the address of the startup
1048 * routine itself. This macro performs whatever initialization to
1049 * the regs structure is required as well as any relocations to the
1050 * function descriptor entries when executing dynamically links apps.
1051 */
1052 ELF_PLAT_INIT(regs, reloc_func_desc);
1053#endif
1054
1055 start_thread(regs, elf_entry, bprm->p);
1056 if (unlikely(current->ptrace & PT_PTRACED)) {
1057 if (current->ptrace & PT_TRACE_EXEC)
1058 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1059 else
1060 send_sig(SIGTRAP, current, 0);
1061 }
1062 retval = 0;
1063out:
1064 kfree(loc);
1065out_ret:
1066 return retval;
1067
1068 /* error cleanup */
1069out_free_dentry:
1070 allow_write_access(interpreter);
1071 if (interpreter)
1072 fput(interpreter);
1073out_free_interp:
f99d49ad 1074 kfree(elf_interpreter);
1da177e4
LT
1075out_free_file:
1076 sys_close(elf_exec_fileno);
1077out_free_fh:
3b9b8ab6
KK
1078 if (files)
1079 reset_files_struct(current, files);
1da177e4
LT
1080out_free_ph:
1081 kfree(elf_phdata);
1082 goto out;
1083}
1084
1085/* This is really simpleminded and specialized - we are loading an
1086 a.out library that is given an ELF header. */
1da177e4
LT
1087static int load_elf_library(struct file *file)
1088{
1089 struct elf_phdr *elf_phdata;
1090 struct elf_phdr *eppnt;
1091 unsigned long elf_bss, bss, len;
1092 int retval, error, i, j;
1093 struct elfhdr elf_ex;
1094
1095 error = -ENOEXEC;
f4e5cc2c 1096 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1da177e4
LT
1097 if (retval != sizeof(elf_ex))
1098 goto out;
1099
1100 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1101 goto out;
1102
1103 /* First of all, some simple consistency checks */
1104 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
f4e5cc2c 1105 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1da177e4
LT
1106 goto out;
1107
1108 /* Now read in all of the header information */
1109
1110 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1111 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1112
1113 error = -ENOMEM;
1114 elf_phdata = kmalloc(j, GFP_KERNEL);
1115 if (!elf_phdata)
1116 goto out;
1117
1118 eppnt = elf_phdata;
1119 error = -ENOEXEC;
1120 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1121 if (retval != j)
1122 goto out_free_ph;
1123
1124 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1125 if ((eppnt + i)->p_type == PT_LOAD)
1126 j++;
1127 if (j != 1)
1128 goto out_free_ph;
1129
1130 while (eppnt->p_type != PT_LOAD)
1131 eppnt++;
1132
1133 /* Now use mmap to map the library into memory. */
1134 down_write(&current->mm->mmap_sem);
1135 error = do_mmap(file,
1136 ELF_PAGESTART(eppnt->p_vaddr),
1137 (eppnt->p_filesz +
1138 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1139 PROT_READ | PROT_WRITE | PROT_EXEC,
1140 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1141 (eppnt->p_offset -
1142 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1143 up_write(&current->mm->mmap_sem);
1144 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1145 goto out_free_ph;
1146
1147 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1148 if (padzero(elf_bss)) {
1149 error = -EFAULT;
1150 goto out_free_ph;
1151 }
1152
f4e5cc2c
JJ
1153 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1154 ELF_MIN_ALIGN - 1);
1da177e4
LT
1155 bss = eppnt->p_memsz + eppnt->p_vaddr;
1156 if (bss > len) {
1157 down_write(&current->mm->mmap_sem);
1158 do_brk(len, bss - len);
1159 up_write(&current->mm->mmap_sem);
1160 }
1161 error = 0;
1162
1163out_free_ph:
1164 kfree(elf_phdata);
1165out:
1166 return error;
1167}
1168
1169/*
1170 * Note that some platforms still use traditional core dumps and not
1171 * the ELF core dump. Each platform can select it as appropriate.
1172 */
708e9a79 1173#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1da177e4
LT
1174
1175/*
1176 * ELF core dumper
1177 *
1178 * Modelled on fs/exec.c:aout_core_dump()
1179 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1180 */
1181/*
1182 * These are the only things you should do on a core-file: use only these
1183 * functions to write out all the necessary info.
1184 */
1185static int dump_write(struct file *file, const void *addr, int nr)
1186{
1187 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1188}
1189
5db92850 1190static int dump_seek(struct file *file, loff_t off)
1da177e4 1191{
d025c9db 1192 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
7f14daa1 1193 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
1da177e4 1194 return 0;
d025c9db
AK
1195 } else {
1196 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
1197 if (!buf)
1198 return 0;
1199 while (off > 0) {
1200 unsigned long n = off;
1201 if (n > PAGE_SIZE)
1202 n = PAGE_SIZE;
1203 if (!dump_write(file, buf, n))
1204 return 0;
1205 off -= n;
1206 }
1207 free_page((unsigned long)buf);
1208 }
1da177e4
LT
1209 return 1;
1210}
1211
1212/*
82df3973 1213 * Decide what to dump of a segment, part, all or none.
1da177e4 1214 */
82df3973
RM
1215static unsigned long vma_dump_size(struct vm_area_struct *vma,
1216 unsigned long mm_flags)
1da177e4 1217{
e5b97dde
RM
1218 /* The vma can be set up to tell us the answer directly. */
1219 if (vma->vm_flags & VM_ALWAYSDUMP)
82df3973 1220 goto whole;
e5b97dde 1221
1da177e4
LT
1222 /* Do not dump I/O mapped devices or special mappings */
1223 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1224 return 0;
1225
82df3973
RM
1226#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1227
a1b59e80
KH
1228 /* By default, dump shared memory if mapped from an anonymous file. */
1229 if (vma->vm_flags & VM_SHARED) {
82df3973
RM
1230 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
1231 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1232 goto whole;
1233 return 0;
a1b59e80 1234 }
1da177e4 1235
82df3973
RM
1236 /* Dump segments that have been written to. */
1237 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1238 goto whole;
1239 if (vma->vm_file == NULL)
1240 return 0;
1da177e4 1241
82df3973
RM
1242 if (FILTER(MAPPED_PRIVATE))
1243 goto whole;
1244
1245 /*
1246 * If this looks like the beginning of a DSO or executable mapping,
1247 * check for an ELF header. If we find one, dump the first page to
1248 * aid in determining what was mapped here.
1249 */
1250 if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
1251 u32 __user *header = (u32 __user *) vma->vm_start;
1252 u32 word;
1253 /*
1254 * Doing it this way gets the constant folded by GCC.
1255 */
1256 union {
1257 u32 cmp;
1258 char elfmag[SELFMAG];
1259 } magic;
1260 BUILD_BUG_ON(SELFMAG != sizeof word);
1261 magic.elfmag[EI_MAG0] = ELFMAG0;
1262 magic.elfmag[EI_MAG1] = ELFMAG1;
1263 magic.elfmag[EI_MAG2] = ELFMAG2;
1264 magic.elfmag[EI_MAG3] = ELFMAG3;
1265 if (get_user(word, header) == 0 && word == magic.cmp)
1266 return PAGE_SIZE;
1267 }
1268
1269#undef FILTER
1270
1271 return 0;
1272
1273whole:
1274 return vma->vm_end - vma->vm_start;
1da177e4
LT
1275}
1276
1da177e4
LT
1277/* An ELF note in memory */
1278struct memelfnote
1279{
1280 const char *name;
1281 int type;
1282 unsigned int datasz;
1283 void *data;
1284};
1285
1286static int notesize(struct memelfnote *en)
1287{
1288 int sz;
1289
1290 sz = sizeof(struct elf_note);
1291 sz += roundup(strlen(en->name) + 1, 4);
1292 sz += roundup(en->datasz, 4);
1293
1294 return sz;
1295}
1296
d025c9db
AK
1297#define DUMP_WRITE(addr, nr, foffset) \
1298 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1da177e4 1299
d025c9db 1300static int alignfile(struct file *file, loff_t *foffset)
1da177e4 1301{
a7a0d86f 1302 static const char buf[4] = { 0, };
d025c9db
AK
1303 DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
1304 return 1;
1305}
1da177e4 1306
d025c9db
AK
1307static int writenote(struct memelfnote *men, struct file *file,
1308 loff_t *foffset)
1309{
1310 struct elf_note en;
1da177e4
LT
1311 en.n_namesz = strlen(men->name) + 1;
1312 en.n_descsz = men->datasz;
1313 en.n_type = men->type;
1314
d025c9db
AK
1315 DUMP_WRITE(&en, sizeof(en), foffset);
1316 DUMP_WRITE(men->name, en.n_namesz, foffset);
1317 if (!alignfile(file, foffset))
1318 return 0;
1319 DUMP_WRITE(men->data, men->datasz, foffset);
1320 if (!alignfile(file, foffset))
1321 return 0;
1da177e4
LT
1322
1323 return 1;
1324}
1325#undef DUMP_WRITE
1da177e4
LT
1326
1327#define DUMP_WRITE(addr, nr) \
1328 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1329 goto end_coredump;
1330#define DUMP_SEEK(off) \
1331 if (!dump_seek(file, (off))) \
1332 goto end_coredump;
1333
858119e1 1334static void fill_elf_header(struct elfhdr *elf, int segs)
1da177e4
LT
1335{
1336 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1337 elf->e_ident[EI_CLASS] = ELF_CLASS;
1338 elf->e_ident[EI_DATA] = ELF_DATA;
1339 elf->e_ident[EI_VERSION] = EV_CURRENT;
1340 elf->e_ident[EI_OSABI] = ELF_OSABI;
1341 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1342
1343 elf->e_type = ET_CORE;
1344 elf->e_machine = ELF_ARCH;
1345 elf->e_version = EV_CURRENT;
1346 elf->e_entry = 0;
1347 elf->e_phoff = sizeof(struct elfhdr);
1348 elf->e_shoff = 0;
1349 elf->e_flags = ELF_CORE_EFLAGS;
1350 elf->e_ehsize = sizeof(struct elfhdr);
1351 elf->e_phentsize = sizeof(struct elf_phdr);
1352 elf->e_phnum = segs;
1353 elf->e_shentsize = 0;
1354 elf->e_shnum = 0;
1355 elf->e_shstrndx = 0;
1356 return;
1357}
1358
8d6b5eee 1359static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1da177e4
LT
1360{
1361 phdr->p_type = PT_NOTE;
1362 phdr->p_offset = offset;
1363 phdr->p_vaddr = 0;
1364 phdr->p_paddr = 0;
1365 phdr->p_filesz = sz;
1366 phdr->p_memsz = 0;
1367 phdr->p_flags = 0;
1368 phdr->p_align = 0;
1369 return;
1370}
1371
1372static void fill_note(struct memelfnote *note, const char *name, int type,
1373 unsigned int sz, void *data)
1374{
1375 note->name = name;
1376 note->type = type;
1377 note->datasz = sz;
1378 note->data = data;
1379 return;
1380}
1381
1382/*
f4e5cc2c
JJ
1383 * fill up all the fields in prstatus from the given task struct, except
1384 * registers which need to be filled up separately.
1da177e4
LT
1385 */
1386static void fill_prstatus(struct elf_prstatus *prstatus,
f4e5cc2c 1387 struct task_struct *p, long signr)
1da177e4
LT
1388{
1389 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1390 prstatus->pr_sigpend = p->pending.signal.sig[0];
1391 prstatus->pr_sighold = p->blocked.sig[0];
b488893a 1392 prstatus->pr_pid = task_pid_vnr(p);
45626bb2 1393 prstatus->pr_ppid = task_pid_vnr(p->real_parent);
b488893a
PE
1394 prstatus->pr_pgrp = task_pgrp_vnr(p);
1395 prstatus->pr_sid = task_session_vnr(p);
1da177e4
LT
1396 if (thread_group_leader(p)) {
1397 /*
1398 * This is the record for the group leader. Add in the
1399 * cumulative times of previous dead threads. This total
1400 * won't include the time of each live thread whose state
1401 * is included in the core dump. The final total reported
1402 * to our parent process when it calls wait4 will include
1403 * those sums as well as the little bit more time it takes
1404 * this and each other thread to finish dying after the
1405 * core dump synchronization phase.
1406 */
1407 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1408 &prstatus->pr_utime);
1409 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1410 &prstatus->pr_stime);
1411 } else {
1412 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1413 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1414 }
1415 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1416 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1417}
1418
1419static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1420 struct mm_struct *mm)
1421{
a84a5059 1422 unsigned int i, len;
1da177e4
LT
1423
1424 /* first copy the parameters from user space */
1425 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1426
1427 len = mm->arg_end - mm->arg_start;
1428 if (len >= ELF_PRARGSZ)
1429 len = ELF_PRARGSZ-1;
1430 if (copy_from_user(&psinfo->pr_psargs,
1431 (const char __user *)mm->arg_start, len))
1432 return -EFAULT;
1433 for(i = 0; i < len; i++)
1434 if (psinfo->pr_psargs[i] == 0)
1435 psinfo->pr_psargs[i] = ' ';
1436 psinfo->pr_psargs[len] = 0;
1437
b488893a 1438 psinfo->pr_pid = task_pid_vnr(p);
45626bb2 1439 psinfo->pr_ppid = task_pid_vnr(p->real_parent);
b488893a
PE
1440 psinfo->pr_pgrp = task_pgrp_vnr(p);
1441 psinfo->pr_sid = task_session_vnr(p);
1da177e4
LT
1442
1443 i = p->state ? ffz(~p->state) + 1 : 0;
1444 psinfo->pr_state = i;
55148548 1445 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1da177e4
LT
1446 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1447 psinfo->pr_nice = task_nice(p);
1448 psinfo->pr_flag = p->flags;
1449 SET_UID(psinfo->pr_uid, p->uid);
1450 SET_GID(psinfo->pr_gid, p->gid);
1451 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1452
1453 return 0;
1454}
1455
1456/* Here is the structure in which status of each thread is captured. */
1457struct elf_thread_status
1458{
1459 struct list_head list;
1460 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1461 elf_fpregset_t fpu; /* NT_PRFPREG */
1462 struct task_struct *thread;
1463#ifdef ELF_CORE_COPY_XFPREGS
5b20cd80 1464 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1da177e4
LT
1465#endif
1466 struct memelfnote notes[3];
1467 int num_notes;
1468};
1469
1470/*
1471 * In order to add the specific thread information for the elf file format,
f4e5cc2c
JJ
1472 * we need to keep a linked list of every threads pr_status and then create
1473 * a single section for them in the final core file.
1da177e4
LT
1474 */
1475static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1476{
1477 int sz = 0;
1478 struct task_struct *p = t->thread;
1479 t->num_notes = 0;
1480
1481 fill_prstatus(&t->prstatus, p, signr);
1482 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1483
f4e5cc2c
JJ
1484 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1485 &(t->prstatus));
1da177e4
LT
1486 t->num_notes++;
1487 sz += notesize(&t->notes[0]);
1488
f4e5cc2c
JJ
1489 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1490 &t->fpu))) {
1491 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1492 &(t->fpu));
1da177e4
LT
1493 t->num_notes++;
1494 sz += notesize(&t->notes[1]);
1495 }
1496
1497#ifdef ELF_CORE_COPY_XFPREGS
1498 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
5b20cd80
MN
1499 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1500 sizeof(t->xfpu), &t->xfpu);
1da177e4
LT
1501 t->num_notes++;
1502 sz += notesize(&t->notes[2]);
1503 }
1504#endif
1505 return sz;
1506}
1507
f47aef55
RM
1508static struct vm_area_struct *first_vma(struct task_struct *tsk,
1509 struct vm_area_struct *gate_vma)
1510{
1511 struct vm_area_struct *ret = tsk->mm->mmap;
1512
1513 if (ret)
1514 return ret;
1515 return gate_vma;
1516}
1517/*
1518 * Helper function for iterating across a vma list. It ensures that the caller
1519 * will visit `gate_vma' prior to terminating the search.
1520 */
1521static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1522 struct vm_area_struct *gate_vma)
1523{
1524 struct vm_area_struct *ret;
1525
1526 ret = this_vma->vm_next;
1527 if (ret)
1528 return ret;
1529 if (this_vma == gate_vma)
1530 return NULL;
1531 return gate_vma;
1532}
1533
1da177e4
LT
1534/*
1535 * Actual dumper
1536 *
1537 * This is a two-pass process; first we find the offsets of the bits,
1538 * and then they are actually written out. If we run out of core limit
1539 * we just truncate.
1540 */
7dc0b22e 1541static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
1da177e4
LT
1542{
1543#define NUM_NOTES 6
1544 int has_dumped = 0;
1545 mm_segment_t fs;
1546 int segs;
1547 size_t size = 0;
1548 int i;
f47aef55 1549 struct vm_area_struct *vma, *gate_vma;
1da177e4 1550 struct elfhdr *elf = NULL;
d025c9db 1551 loff_t offset = 0, dataoff, foffset;
1da177e4
LT
1552 int numnote;
1553 struct memelfnote *notes = NULL;
1554 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1555 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1556 struct task_struct *g, *p;
1557 LIST_HEAD(thread_list);
1558 struct list_head *t;
1559 elf_fpregset_t *fpu = NULL;
1560#ifdef ELF_CORE_COPY_XFPREGS
1561 elf_fpxregset_t *xfpu = NULL;
1562#endif
1563 int thread_status_size = 0;
1564 elf_addr_t *auxv;
a1b59e80 1565 unsigned long mm_flags;
1da177e4
LT
1566
1567 /*
1568 * We no longer stop all VM operations.
1569 *
f4e5cc2c
JJ
1570 * This is because those proceses that could possibly change map_count
1571 * or the mmap / vma pages are now blocked in do_exit on current
1572 * finishing this core dump.
1da177e4
LT
1573 *
1574 * Only ptrace can touch these memory addresses, but it doesn't change
f4e5cc2c 1575 * the map_count or the pages allocated. So no possibility of crashing
1da177e4
LT
1576 * exists while dumping the mm->vm_next areas to the core file.
1577 */
1578
1579 /* alloc memory for large data structures: too large to be on stack */
1580 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1581 if (!elf)
1582 goto cleanup;
1583 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1584 if (!prstatus)
1585 goto cleanup;
1586 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1587 if (!psinfo)
1588 goto cleanup;
1589 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1590 if (!notes)
1591 goto cleanup;
1592 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1593 if (!fpu)
1594 goto cleanup;
1595#ifdef ELF_CORE_COPY_XFPREGS
1596 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1597 if (!xfpu)
1598 goto cleanup;
1599#endif
1600
1601 if (signr) {
1602 struct elf_thread_status *tmp;
486ccb05 1603 rcu_read_lock();
1da177e4
LT
1604 do_each_thread(g,p)
1605 if (current->mm == p->mm && current != p) {
11b0b5ab 1606 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1da177e4 1607 if (!tmp) {
486ccb05 1608 rcu_read_unlock();
1da177e4
LT
1609 goto cleanup;
1610 }
1da177e4
LT
1611 tmp->thread = p;
1612 list_add(&tmp->list, &thread_list);
1613 }
1614 while_each_thread(g,p);
486ccb05 1615 rcu_read_unlock();
1da177e4
LT
1616 list_for_each(t, &thread_list) {
1617 struct elf_thread_status *tmp;
1618 int sz;
1619
1620 tmp = list_entry(t, struct elf_thread_status, list);
1621 sz = elf_dump_thread_status(signr, tmp);
1622 thread_status_size += sz;
1623 }
1624 }
1625 /* now collect the dump for the current */
1626 memset(prstatus, 0, sizeof(*prstatus));
1627 fill_prstatus(prstatus, current, signr);
1628 elf_core_copy_regs(&prstatus->pr_reg, regs);
1629
1630 segs = current->mm->map_count;
1631#ifdef ELF_CORE_EXTRA_PHDRS
1632 segs += ELF_CORE_EXTRA_PHDRS;
1633#endif
1634
f47aef55
RM
1635 gate_vma = get_gate_vma(current);
1636 if (gate_vma != NULL)
1637 segs++;
1638
1da177e4 1639 /* Set up header */
f4e5cc2c 1640 fill_elf_header(elf, segs + 1); /* including notes section */
1da177e4
LT
1641
1642 has_dumped = 1;
1643 current->flags |= PF_DUMPCORE;
1644
1645 /*
1646 * Set up the notes in similar form to SVR4 core dumps made
1647 * with info from their /proc.
1648 */
1649
f4e5cc2c 1650 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1da177e4 1651 fill_psinfo(psinfo, current->group_leader, current->mm);
f4e5cc2c 1652 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1da177e4 1653
a9289728 1654 numnote = 2;
1da177e4 1655
f4e5cc2c 1656 auxv = (elf_addr_t *)current->mm->saved_auxv;
1da177e4
LT
1657
1658 i = 0;
1659 do
1660 i += 2;
1661 while (auxv[i - 2] != AT_NULL);
1662 fill_note(&notes[numnote++], "CORE", NT_AUXV,
f4e5cc2c 1663 i * sizeof(elf_addr_t), auxv);
1da177e4
LT
1664
1665 /* Try to dump the FPU. */
f4e5cc2c
JJ
1666 if ((prstatus->pr_fpvalid =
1667 elf_core_copy_task_fpregs(current, regs, fpu)))
1da177e4
LT
1668 fill_note(notes + numnote++,
1669 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1670#ifdef ELF_CORE_COPY_XFPREGS
1671 if (elf_core_copy_task_xfpregs(current, xfpu))
1672 fill_note(notes + numnote++,
5b20cd80 1673 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
1da177e4
LT
1674#endif
1675
1676 fs = get_fs();
1677 set_fs(KERNEL_DS);
1678
1679 DUMP_WRITE(elf, sizeof(*elf));
1680 offset += sizeof(*elf); /* Elf header */
a7a0d86f
PV
1681 offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
1682 foffset = offset;
1da177e4
LT
1683
1684 /* Write notes phdr entry */
1685 {
1686 struct elf_phdr phdr;
1687 int sz = 0;
1688
1689 for (i = 0; i < numnote; i++)
1690 sz += notesize(notes + i);
1691
1692 sz += thread_status_size;
1693
e5501492 1694 sz += elf_coredump_extra_notes_size();
bf1ab978 1695
1da177e4
LT
1696 fill_elf_note_phdr(&phdr, sz, offset);
1697 offset += sz;
1698 DUMP_WRITE(&phdr, sizeof(phdr));
1699 }
1700
1da177e4
LT
1701 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1702
a1b59e80
KH
1703 /*
1704 * We must use the same mm->flags while dumping core to avoid
1705 * inconsistency between the program headers and bodies, otherwise an
1706 * unusable core file can be generated.
1707 */
1708 mm_flags = current->mm->flags;
1709
1da177e4 1710 /* Write program headers for segments dump */
f47aef55
RM
1711 for (vma = first_vma(current, gate_vma); vma != NULL;
1712 vma = next_vma(vma, gate_vma)) {
1da177e4 1713 struct elf_phdr phdr;
1da177e4
LT
1714
1715 phdr.p_type = PT_LOAD;
1716 phdr.p_offset = offset;
1717 phdr.p_vaddr = vma->vm_start;
1718 phdr.p_paddr = 0;
82df3973
RM
1719 phdr.p_filesz = vma_dump_size(vma, mm_flags);
1720 phdr.p_memsz = vma->vm_end - vma->vm_start;
1da177e4
LT
1721 offset += phdr.p_filesz;
1722 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
f4e5cc2c
JJ
1723 if (vma->vm_flags & VM_WRITE)
1724 phdr.p_flags |= PF_W;
1725 if (vma->vm_flags & VM_EXEC)
1726 phdr.p_flags |= PF_X;
1da177e4
LT
1727 phdr.p_align = ELF_EXEC_PAGESIZE;
1728
1729 DUMP_WRITE(&phdr, sizeof(phdr));
1730 }
1731
1732#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1733 ELF_CORE_WRITE_EXTRA_PHDRS;
1734#endif
1735
1736 /* write out the notes section */
1737 for (i = 0; i < numnote; i++)
d025c9db 1738 if (!writenote(notes + i, file, &foffset))
1da177e4
LT
1739 goto end_coredump;
1740
e5501492
ME
1741 if (elf_coredump_extra_notes_write(file, &foffset))
1742 goto end_coredump;
bf1ab978 1743
1da177e4
LT
1744 /* write out the thread status notes section */
1745 list_for_each(t, &thread_list) {
f4e5cc2c
JJ
1746 struct elf_thread_status *tmp =
1747 list_entry(t, struct elf_thread_status, list);
1748
1da177e4 1749 for (i = 0; i < tmp->num_notes; i++)
d025c9db 1750 if (!writenote(&tmp->notes[i], file, &foffset))
1da177e4
LT
1751 goto end_coredump;
1752 }
d025c9db
AK
1753
1754 /* Align to page */
1755 DUMP_SEEK(dataoff - foffset);
1da177e4 1756
f47aef55
RM
1757 for (vma = first_vma(current, gate_vma); vma != NULL;
1758 vma = next_vma(vma, gate_vma)) {
1da177e4 1759 unsigned long addr;
82df3973 1760 unsigned long end;
1da177e4 1761
82df3973 1762 end = vma->vm_start + vma_dump_size(vma, mm_flags);
1da177e4 1763
82df3973 1764 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
f4e5cc2c 1765 struct page *page;
1da177e4
LT
1766 struct vm_area_struct *vma;
1767
1768 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1769 &page, &vma) <= 0) {
d025c9db 1770 DUMP_SEEK(PAGE_SIZE);
1da177e4 1771 } else {
557ed1fa 1772 if (page == ZERO_PAGE(0)) {
03221702
BP
1773 if (!dump_seek(file, PAGE_SIZE)) {
1774 page_cache_release(page);
1775 goto end_coredump;
1776 }
1da177e4
LT
1777 } else {
1778 void *kaddr;
f4e5cc2c
JJ
1779 flush_cache_page(vma, addr,
1780 page_to_pfn(page));
1da177e4
LT
1781 kaddr = kmap(page);
1782 if ((size += PAGE_SIZE) > limit ||
1783 !dump_write(file, kaddr,
1784 PAGE_SIZE)) {
1785 kunmap(page);
1786 page_cache_release(page);
1787 goto end_coredump;
1788 }
1789 kunmap(page);
1790 }
1791 page_cache_release(page);
1792 }
1793 }
1794 }
1795
1796#ifdef ELF_CORE_WRITE_EXTRA_DATA
1797 ELF_CORE_WRITE_EXTRA_DATA;
1798#endif
1799
1da177e4
LT
1800end_coredump:
1801 set_fs(fs);
1802
1803cleanup:
74da6cd0 1804 while (!list_empty(&thread_list)) {
1da177e4
LT
1805 struct list_head *tmp = thread_list.next;
1806 list_del(tmp);
1807 kfree(list_entry(tmp, struct elf_thread_status, list));
1808 }
1809
1810 kfree(elf);
1811 kfree(prstatus);
1812 kfree(psinfo);
1813 kfree(notes);
1814 kfree(fpu);
1815#ifdef ELF_CORE_COPY_XFPREGS
1816 kfree(xfpu);
1817#endif
1818 return has_dumped;
1819#undef NUM_NOTES
1820}
1821
1822#endif /* USE_ELF_CORE_DUMP */
1823
1824static int __init init_elf_binfmt(void)
1825{
1826 return register_binfmt(&elf_format);
1827}
1828
1829static void __exit exit_elf_binfmt(void)
1830{
1831 /* Remove the COFF and ELF loaders. */
1832 unregister_binfmt(&elf_format);
1833}
1834
1835core_initcall(init_elf_binfmt);
1836module_exit(exit_elf_binfmt);
1837MODULE_LICENSE("GPL");