]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/binfmt_elf.c
x86: PIE executable randomization
[net-next-2.6.git] / fs / binfmt_elf.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/stat.h>
16#include <linux/time.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/a.out.h>
20#include <linux/errno.h>
21#include <linux/signal.h>
22#include <linux/binfmts.h>
23#include <linux/string.h>
24#include <linux/file.h>
25#include <linux/fcntl.h>
26#include <linux/ptrace.h>
27#include <linux/slab.h>
28#include <linux/shm.h>
29#include <linux/personality.h>
30#include <linux/elfcore.h>
31#include <linux/init.h>
32#include <linux/highuid.h>
33#include <linux/smp.h>
1da177e4
LT
34#include <linux/compiler.h>
35#include <linux/highmem.h>
36#include <linux/pagemap.h>
37#include <linux/security.h>
38#include <linux/syscalls.h>
39#include <linux/random.h>
f4e5cc2c 40#include <linux/elf.h>
7e80d0d0 41#include <linux/utsname.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/param.h>
44#include <asm/page.h>
45
f4e5cc2c
JJ
46static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47static int load_elf_library(struct file *);
cc503c1b 48static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
1da177e4 49
1da177e4
LT
50/*
51 * If we don't support core dumping, then supply a NULL so we
52 * don't even try.
53 */
708e9a79 54#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
7dc0b22e 55static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
1da177e4
LT
56#else
57#define elf_core_dump NULL
58#endif
59
60#if ELF_EXEC_PAGESIZE > PAGE_SIZE
f4e5cc2c 61#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
1da177e4 62#else
f4e5cc2c 63#define ELF_MIN_ALIGN PAGE_SIZE
1da177e4
LT
64#endif
65
66#ifndef ELF_CORE_EFLAGS
67#define ELF_CORE_EFLAGS 0
68#endif
69
70#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
73
74static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
9fbbd4dd
AK
79 .min_coredump = ELF_EXEC_PAGESIZE,
80 .hasvdso = 1
1da177e4
LT
81};
82
d4e3cc38 83#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
1da177e4
LT
84
85static int set_brk(unsigned long start, unsigned long end)
86{
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
89 if (end > start) {
90 unsigned long addr;
91 down_write(&current->mm->mmap_sem);
92 addr = do_brk(start, end - start);
93 up_write(&current->mm->mmap_sem);
94 if (BAD_ADDR(addr))
95 return addr;
96 }
97 current->mm->start_brk = current->mm->brk = end;
98 return 0;
99}
100
1da177e4
LT
101/* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
f4e5cc2c
JJ
104 be in memory
105 */
1da177e4
LT
106static int padzero(unsigned long elf_bss)
107{
108 unsigned long nbyte;
109
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 if (clear_user((void __user *) elf_bss, nbyte))
114 return -EFAULT;
115 }
116 return 0;
117}
118
119/* Let's use some macros to make this stack manipulation a litle clearer */
120#ifdef CONFIG_STACK_GROWSUP
121#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122#define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
f4e5cc2c
JJ
124#define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
126 old_sp; })
1da177e4
LT
127#else
128#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129#define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132#endif
133
134static int
f4e5cc2c 135create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
1da177e4
LT
136 int interp_aout, unsigned long load_addr,
137 unsigned long interp_load_addr)
138{
139 unsigned long p = bprm->p;
140 int argc = bprm->argc;
141 int envc = bprm->envc;
142 elf_addr_t __user *argv;
143 elf_addr_t __user *envp;
144 elf_addr_t __user *sp;
145 elf_addr_t __user *u_platform;
146 const char *k_platform = ELF_PLATFORM;
147 int items;
148 elf_addr_t *elf_info;
149 int ei_index = 0;
150 struct task_struct *tsk = current;
b6a2fea3 151 struct vm_area_struct *vma;
1da177e4 152
d68c9d6a
FBH
153 /*
154 * In some cases (e.g. Hyper-Threading), we want to avoid L1
155 * evictions by the processes running on the same package. One
156 * thing we can do is to shuffle the initial stack for them.
157 */
158
159 p = arch_align_stack(p);
160
1da177e4
LT
161 /*
162 * If this architecture has a platform capability string, copy it
163 * to userspace. In some cases (Sparc), this info is impossible
164 * for userspace to get any other way, in others (i386) it is
165 * merely difficult.
166 */
1da177e4
LT
167 u_platform = NULL;
168 if (k_platform) {
169 size_t len = strlen(k_platform) + 1;
170
1da177e4
LT
171 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
172 if (__copy_to_user(u_platform, k_platform, len))
173 return -EFAULT;
174 }
175
176 /* Create the ELF interpreter info */
785d5570 177 elf_info = (elf_addr_t *)current->mm->saved_auxv;
4f9a58d7 178 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
1da177e4 179#define NEW_AUX_ENT(id, val) \
f4e5cc2c 180 do { \
785d5570
JJ
181 elf_info[ei_index++] = id; \
182 elf_info[ei_index++] = val; \
f4e5cc2c 183 } while (0)
1da177e4
LT
184
185#ifdef ARCH_DLINFO
186 /*
187 * ARCH_DLINFO must come first so PPC can do its special alignment of
188 * AUXV.
4f9a58d7
OH
189 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
190 * ARCH_DLINFO changes
1da177e4
LT
191 */
192 ARCH_DLINFO;
193#endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
f4e5cc2c 198 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
1da177e4
LT
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
785d5570
JJ
203 NEW_AUX_ENT(AT_UID, tsk->uid);
204 NEW_AUX_ENT(AT_EUID, tsk->euid);
205 NEW_AUX_ENT(AT_GID, tsk->gid);
206 NEW_AUX_ENT(AT_EGID, tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
1da177e4 208 if (k_platform) {
f4e5cc2c 209 NEW_AUX_ENT(AT_PLATFORM,
785d5570 210 (elf_addr_t)(unsigned long)u_platform);
1da177e4
LT
211 }
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
785d5570 213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
1da177e4
LT
214 }
215#undef NEW_AUX_ENT
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
219
220 /* And advance past the AT_NULL entry. */
221 ei_index += 2;
222
223 sp = STACK_ADD(p, ei_index);
224
225 items = (argc + 1) + (envc + 1);
226 if (interp_aout) {
227 items += 3; /* a.out interpreters require argv & envp too */
228 } else {
229 items += 1; /* ELF interpreters only put argc on the stack */
230 }
231 bprm->p = STACK_ROUND(sp, items);
232
233 /* Point sp at the lowest address on the stack */
234#ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
f4e5cc2c 236 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
1da177e4
LT
237#else
238 sp = (elf_addr_t __user *)bprm->p;
239#endif
240
b6a2fea3
OW
241
242 /*
243 * Grow the stack manually; some architectures have a limit on how
244 * far ahead a user-space access may be in order to grow the stack.
245 */
246 vma = find_extend_vma(current->mm, bprm->p);
247 if (!vma)
248 return -EFAULT;
249
1da177e4
LT
250 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
251 if (__put_user(argc, sp++))
252 return -EFAULT;
253 if (interp_aout) {
254 argv = sp + 2;
255 envp = argv + argc + 1;
841d5fb7
HC
256 if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
257 __put_user((elf_addr_t)(unsigned long)envp, sp++))
258 return -EFAULT;
1da177e4
LT
259 } else {
260 argv = sp;
261 envp = argv + argc + 1;
262 }
263
264 /* Populate argv and envp */
a84a5059 265 p = current->mm->arg_end = current->mm->arg_start;
1da177e4
LT
266 while (argc-- > 0) {
267 size_t len;
841d5fb7
HC
268 if (__put_user((elf_addr_t)p, argv++))
269 return -EFAULT;
b6a2fea3
OW
270 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
271 if (!len || len > MAX_ARG_STRLEN)
1da177e4
LT
272 return 0;
273 p += len;
274 }
275 if (__put_user(0, argv))
276 return -EFAULT;
277 current->mm->arg_end = current->mm->env_start = p;
278 while (envc-- > 0) {
279 size_t len;
841d5fb7
HC
280 if (__put_user((elf_addr_t)p, envp++))
281 return -EFAULT;
b6a2fea3
OW
282 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
283 if (!len || len > MAX_ARG_STRLEN)
1da177e4
LT
284 return 0;
285 p += len;
286 }
287 if (__put_user(0, envp))
288 return -EFAULT;
289 current->mm->env_end = p;
290
291 /* Put the elf_info on the stack in the right place. */
292 sp = (elf_addr_t __user *)envp + 1;
293 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
294 return -EFAULT;
295 return 0;
296}
297
298#ifndef elf_map
299
300static unsigned long elf_map(struct file *filep, unsigned long addr,
cc503c1b
JK
301 struct elf_phdr *eppnt, int prot, int type,
302 unsigned long total_size)
1da177e4
LT
303{
304 unsigned long map_addr;
cc503c1b
JK
305 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
306 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
307 addr = ELF_PAGESTART(addr);
308 size = ELF_PAGEALIGN(size);
1da177e4 309
dda6ebde
DG
310 /* mmap() will return -EINVAL if given a zero size, but a
311 * segment with zero filesize is perfectly valid */
cc503c1b
JK
312 if (!size)
313 return addr;
314
315 down_write(&current->mm->mmap_sem);
316 /*
317 * total_size is the size of the ELF (interpreter) image.
318 * The _first_ mmap needs to know the full size, otherwise
319 * randomization might put this image into an overlapping
320 * position with the ELF binary image. (since size < total_size)
321 * So we first map the 'big' image - and unmap the remainder at
322 * the end. (which unmap is needed for ELF images with holes.)
323 */
324 if (total_size) {
325 total_size = ELF_PAGEALIGN(total_size);
326 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
327 if (!BAD_ADDR(map_addr))
328 do_munmap(current->mm, map_addr+size, total_size-size);
329 } else
330 map_addr = do_mmap(filep, addr, size, prot, type, off);
331
1da177e4
LT
332 up_write(&current->mm->mmap_sem);
333 return(map_addr);
334}
335
336#endif /* !elf_map */
337
cc503c1b
JK
338static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
339{
340 int i, first_idx = -1, last_idx = -1;
341
342 for (i = 0; i < nr; i++) {
343 if (cmds[i].p_type == PT_LOAD) {
344 last_idx = i;
345 if (first_idx == -1)
346 first_idx = i;
347 }
348 }
349 if (first_idx == -1)
350 return 0;
351
352 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
353 ELF_PAGESTART(cmds[first_idx].p_vaddr);
354}
355
356
1da177e4
LT
357/* This is much more generalized than the library routine read function,
358 so we keep this separate. Technically the library read function
359 is only provided so that we can read a.out libraries that have
360 an ELF header */
361
f4e5cc2c 362static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
cc503c1b
JK
363 struct file *interpreter, unsigned long *interp_map_addr,
364 unsigned long no_base)
1da177e4
LT
365{
366 struct elf_phdr *elf_phdata;
367 struct elf_phdr *eppnt;
368 unsigned long load_addr = 0;
369 int load_addr_set = 0;
370 unsigned long last_bss = 0, elf_bss = 0;
371 unsigned long error = ~0UL;
cc503c1b 372 unsigned long total_size;
1da177e4
LT
373 int retval, i, size;
374
375 /* First of all, some simple consistency checks */
376 if (interp_elf_ex->e_type != ET_EXEC &&
377 interp_elf_ex->e_type != ET_DYN)
378 goto out;
379 if (!elf_check_arch(interp_elf_ex))
380 goto out;
381 if (!interpreter->f_op || !interpreter->f_op->mmap)
382 goto out;
383
384 /*
385 * If the size of this structure has changed, then punt, since
386 * we will be doing the wrong thing.
387 */
388 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
389 goto out;
390 if (interp_elf_ex->e_phnum < 1 ||
391 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
392 goto out;
393
394 /* Now read in all of the header information */
1da177e4
LT
395 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
396 if (size > ELF_MIN_ALIGN)
397 goto out;
f4e5cc2c 398 elf_phdata = kmalloc(size, GFP_KERNEL);
1da177e4
LT
399 if (!elf_phdata)
400 goto out;
401
f4e5cc2c
JJ
402 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
403 (char *)elf_phdata,size);
1da177e4
LT
404 error = -EIO;
405 if (retval != size) {
406 if (retval < 0)
407 error = retval;
408 goto out_close;
409 }
410
cc503c1b
JK
411 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
412 if (!total_size) {
413 error = -EINVAL;
414 goto out_close;
415 }
416
1da177e4 417 eppnt = elf_phdata;
f4e5cc2c
JJ
418 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
419 if (eppnt->p_type == PT_LOAD) {
420 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
421 int elf_prot = 0;
422 unsigned long vaddr = 0;
423 unsigned long k, map_addr;
424
425 if (eppnt->p_flags & PF_R)
426 elf_prot = PROT_READ;
427 if (eppnt->p_flags & PF_W)
428 elf_prot |= PROT_WRITE;
429 if (eppnt->p_flags & PF_X)
430 elf_prot |= PROT_EXEC;
431 vaddr = eppnt->p_vaddr;
432 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
433 elf_type |= MAP_FIXED;
cc503c1b
JK
434 else if (no_base && interp_elf_ex->e_type == ET_DYN)
435 load_addr = -vaddr;
f4e5cc2c
JJ
436
437 map_addr = elf_map(interpreter, load_addr + vaddr,
cc503c1b
JK
438 eppnt, elf_prot, elf_type, total_size);
439 total_size = 0;
440 if (!*interp_map_addr)
441 *interp_map_addr = map_addr;
f4e5cc2c
JJ
442 error = map_addr;
443 if (BAD_ADDR(map_addr))
444 goto out_close;
445
446 if (!load_addr_set &&
447 interp_elf_ex->e_type == ET_DYN) {
448 load_addr = map_addr - ELF_PAGESTART(vaddr);
449 load_addr_set = 1;
450 }
451
452 /*
453 * Check to see if the section's size will overflow the
454 * allowed task size. Note that p_filesz must always be
455 * <= p_memsize so it's only necessary to check p_memsz.
456 */
457 k = load_addr + eppnt->p_vaddr;
ce51059b 458 if (BAD_ADDR(k) ||
f4e5cc2c
JJ
459 eppnt->p_filesz > eppnt->p_memsz ||
460 eppnt->p_memsz > TASK_SIZE ||
461 TASK_SIZE - eppnt->p_memsz < k) {
462 error = -ENOMEM;
463 goto out_close;
464 }
465
466 /*
467 * Find the end of the file mapping for this phdr, and
468 * keep track of the largest address we see for this.
469 */
470 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
471 if (k > elf_bss)
472 elf_bss = k;
473
474 /*
475 * Do the same thing for the memory mapping - between
476 * elf_bss and last_bss is the bss section.
477 */
478 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
479 if (k > last_bss)
480 last_bss = k;
481 }
1da177e4
LT
482 }
483
484 /*
485 * Now fill out the bss section. First pad the last page up
486 * to the page boundary, and then perform a mmap to make sure
487 * that there are zero-mapped pages up to and including the
488 * last bss page.
489 */
490 if (padzero(elf_bss)) {
491 error = -EFAULT;
492 goto out_close;
493 }
494
f4e5cc2c
JJ
495 /* What we have mapped so far */
496 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
1da177e4
LT
497
498 /* Map the last of the bss segment */
499 if (last_bss > elf_bss) {
500 down_write(&current->mm->mmap_sem);
501 error = do_brk(elf_bss, last_bss - elf_bss);
502 up_write(&current->mm->mmap_sem);
503 if (BAD_ADDR(error))
504 goto out_close;
505 }
506
cc503c1b 507 error = load_addr;
1da177e4
LT
508
509out_close:
510 kfree(elf_phdata);
511out:
512 return error;
513}
514
f4e5cc2c
JJ
515static unsigned long load_aout_interp(struct exec *interp_ex,
516 struct file *interpreter)
1da177e4
LT
517{
518 unsigned long text_data, elf_entry = ~0UL;
519 char __user * addr;
520 loff_t offset;
521
522 current->mm->end_code = interp_ex->a_text;
523 text_data = interp_ex->a_text + interp_ex->a_data;
524 current->mm->end_data = text_data;
525 current->mm->brk = interp_ex->a_bss + text_data;
526
527 switch (N_MAGIC(*interp_ex)) {
528 case OMAGIC:
529 offset = 32;
530 addr = (char __user *)0;
531 break;
532 case ZMAGIC:
533 case QMAGIC:
534 offset = N_TXTOFF(*interp_ex);
f4e5cc2c 535 addr = (char __user *)N_TXTADDR(*interp_ex);
1da177e4
LT
536 break;
537 default:
538 goto out;
539 }
540
541 down_write(&current->mm->mmap_sem);
542 do_brk(0, text_data);
543 up_write(&current->mm->mmap_sem);
544 if (!interpreter->f_op || !interpreter->f_op->read)
545 goto out;
546 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
547 goto out;
548 flush_icache_range((unsigned long)addr,
549 (unsigned long)addr + text_data);
550
1da177e4
LT
551 down_write(&current->mm->mmap_sem);
552 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
553 interp_ex->a_bss);
554 up_write(&current->mm->mmap_sem);
555 elf_entry = interp_ex->a_entry;
556
557out:
558 return elf_entry;
559}
560
561/*
562 * These are the functions used to load ELF style executables and shared
563 * libraries. There is no binary dependent code anywhere else.
564 */
565
566#define INTERPRETER_NONE 0
567#define INTERPRETER_AOUT 1
568#define INTERPRETER_ELF 2
569
913bd906 570#ifndef STACK_RND_MASK
d1cabd63 571#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
913bd906 572#endif
1da177e4
LT
573
574static unsigned long randomize_stack_top(unsigned long stack_top)
575{
576 unsigned int random_variable = 0;
577
c16b63e0
AK
578 if ((current->flags & PF_RANDOMIZE) &&
579 !(current->personality & ADDR_NO_RANDOMIZE)) {
913bd906
AK
580 random_variable = get_random_int() & STACK_RND_MASK;
581 random_variable <<= PAGE_SHIFT;
582 }
1da177e4 583#ifdef CONFIG_STACK_GROWSUP
913bd906 584 return PAGE_ALIGN(stack_top) + random_variable;
1da177e4 585#else
913bd906 586 return PAGE_ALIGN(stack_top) - random_variable;
1da177e4
LT
587#endif
588}
589
f4e5cc2c 590static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1da177e4
LT
591{
592 struct file *interpreter = NULL; /* to shut gcc up */
593 unsigned long load_addr = 0, load_bias = 0;
594 int load_addr_set = 0;
595 char * elf_interpreter = NULL;
596 unsigned int interpreter_type = INTERPRETER_NONE;
597 unsigned char ibcs2_interpreter = 0;
598 unsigned long error;
f4e5cc2c 599 struct elf_phdr *elf_ppnt, *elf_phdata;
1da177e4
LT
600 unsigned long elf_bss, elf_brk;
601 int elf_exec_fileno;
602 int retval, i;
603 unsigned int size;
cc503c1b
JK
604 unsigned long elf_entry;
605 unsigned long interp_load_addr = 0;
1da177e4
LT
606 unsigned long start_code, end_code, start_data, end_data;
607 unsigned long reloc_func_desc = 0;
608 char passed_fileno[6];
609 struct files_struct *files;
8de61e69 610 int executable_stack = EXSTACK_DEFAULT;
1da177e4
LT
611 unsigned long def_flags = 0;
612 struct {
613 struct elfhdr elf_ex;
614 struct elfhdr interp_elf_ex;
615 struct exec interp_ex;
616 } *loc;
617
618 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
619 if (!loc) {
620 retval = -ENOMEM;
621 goto out_ret;
622 }
623
624 /* Get the exec-header */
f4e5cc2c 625 loc->elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
626
627 retval = -ENOEXEC;
628 /* First of all, some simple consistency checks */
629 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
630 goto out;
631
632 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
633 goto out;
634 if (!elf_check_arch(&loc->elf_ex))
635 goto out;
636 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
637 goto out;
638
639 /* Now read in all of the header information */
1da177e4
LT
640 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
641 goto out;
642 if (loc->elf_ex.e_phnum < 1 ||
643 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
644 goto out;
645 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
646 retval = -ENOMEM;
f4e5cc2c 647 elf_phdata = kmalloc(size, GFP_KERNEL);
1da177e4
LT
648 if (!elf_phdata)
649 goto out;
650
f4e5cc2c
JJ
651 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
652 (char *)elf_phdata, size);
1da177e4
LT
653 if (retval != size) {
654 if (retval >= 0)
655 retval = -EIO;
656 goto out_free_ph;
657 }
658
f4e5cc2c 659 files = current->files; /* Refcounted so ok */
1da177e4
LT
660 retval = unshare_files();
661 if (retval < 0)
662 goto out_free_ph;
663 if (files == current->files) {
664 put_files_struct(files);
665 files = NULL;
666 }
667
668 /* exec will make our files private anyway, but for the a.out
669 loader stuff we need to do it earlier */
1da177e4
LT
670 retval = get_unused_fd();
671 if (retval < 0)
672 goto out_free_fh;
673 get_file(bprm->file);
674 fd_install(elf_exec_fileno = retval, bprm->file);
675
676 elf_ppnt = elf_phdata;
677 elf_bss = 0;
678 elf_brk = 0;
679
680 start_code = ~0UL;
681 end_code = 0;
682 start_data = 0;
683 end_data = 0;
684
685 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
686 if (elf_ppnt->p_type == PT_INTERP) {
687 /* This is the program interpreter used for
688 * shared libraries - for now assume that this
689 * is an a.out format binary
690 */
1da177e4
LT
691 retval = -ENOEXEC;
692 if (elf_ppnt->p_filesz > PATH_MAX ||
693 elf_ppnt->p_filesz < 2)
694 goto out_free_file;
695
696 retval = -ENOMEM;
792db3af 697 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
f4e5cc2c 698 GFP_KERNEL);
1da177e4
LT
699 if (!elf_interpreter)
700 goto out_free_file;
701
702 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
f4e5cc2c
JJ
703 elf_interpreter,
704 elf_ppnt->p_filesz);
1da177e4
LT
705 if (retval != elf_ppnt->p_filesz) {
706 if (retval >= 0)
707 retval = -EIO;
708 goto out_free_interp;
709 }
710 /* make sure path is NULL terminated */
711 retval = -ENOEXEC;
712 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
713 goto out_free_interp;
714
715 /* If the program interpreter is one of these two,
716 * then assume an iBCS2 image. Otherwise assume
717 * a native linux image.
718 */
719 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
720 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
721 ibcs2_interpreter = 1;
722
723 /*
724 * The early SET_PERSONALITY here is so that the lookup
725 * for the interpreter happens in the namespace of the
726 * to-be-execed image. SET_PERSONALITY can select an
727 * alternate root.
728 *
729 * However, SET_PERSONALITY is NOT allowed to switch
730 * this task into the new images's memory mapping
731 * policy - that is, TASK_SIZE must still evaluate to
732 * that which is appropriate to the execing application.
733 * This is because exit_mmap() needs to have TASK_SIZE
734 * evaluate to the size of the old image.
735 *
736 * So if (say) a 64-bit application is execing a 32-bit
737 * application it is the architecture's responsibility
738 * to defer changing the value of TASK_SIZE until the
739 * switch really is going to happen - do this in
740 * flush_thread(). - akpm
741 */
742 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
743
744 interpreter = open_exec(elf_interpreter);
745 retval = PTR_ERR(interpreter);
746 if (IS_ERR(interpreter))
747 goto out_free_interp;
1fb84496
AD
748
749 /*
750 * If the binary is not readable then enforce
751 * mm->dumpable = 0 regardless of the interpreter's
752 * permissions.
753 */
754 if (file_permission(interpreter, MAY_READ) < 0)
755 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
756
f4e5cc2c
JJ
757 retval = kernel_read(interpreter, 0, bprm->buf,
758 BINPRM_BUF_SIZE);
1da177e4
LT
759 if (retval != BINPRM_BUF_SIZE) {
760 if (retval >= 0)
761 retval = -EIO;
762 goto out_free_dentry;
763 }
764
765 /* Get the exec headers */
f4e5cc2c
JJ
766 loc->interp_ex = *((struct exec *)bprm->buf);
767 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
768 break;
769 }
770 elf_ppnt++;
771 }
772
773 elf_ppnt = elf_phdata;
774 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
775 if (elf_ppnt->p_type == PT_GNU_STACK) {
776 if (elf_ppnt->p_flags & PF_X)
777 executable_stack = EXSTACK_ENABLE_X;
778 else
779 executable_stack = EXSTACK_DISABLE_X;
780 break;
781 }
1da177e4
LT
782
783 /* Some simple consistency checks for the interpreter */
784 if (elf_interpreter) {
8e9073ed 785 static int warn;
1da177e4
LT
786 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
787
788 /* Now figure out which format our binary is */
789 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
790 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
791 (N_MAGIC(loc->interp_ex) != QMAGIC))
792 interpreter_type = INTERPRETER_ELF;
793
794 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
795 interpreter_type &= ~INTERPRETER_ELF;
796
8e9073ed
AK
797 if (interpreter_type == INTERPRETER_AOUT && warn < 10) {
798 printk(KERN_WARNING "a.out ELF interpreter %s is "
799 "deprecated and will not be supported "
800 "after Linux 2.6.25\n", elf_interpreter);
801 warn++;
802 }
803
1da177e4
LT
804 retval = -ELIBBAD;
805 if (!interpreter_type)
806 goto out_free_dentry;
807
808 /* Make sure only one type was selected */
809 if ((interpreter_type & INTERPRETER_ELF) &&
810 interpreter_type != INTERPRETER_ELF) {
811 // FIXME - ratelimit this before re-enabling
812 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
813 interpreter_type = INTERPRETER_ELF;
814 }
815 /* Verify the interpreter has a valid arch */
816 if ((interpreter_type == INTERPRETER_ELF) &&
817 !elf_check_arch(&loc->interp_elf_ex))
818 goto out_free_dentry;
819 } else {
820 /* Executables without an interpreter also need a personality */
821 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
822 }
823
824 /* OK, we are done with that, now set up the arg stuff,
825 and then start this sucker up */
1da177e4
LT
826 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
827 char *passed_p = passed_fileno;
828 sprintf(passed_fileno, "%d", elf_exec_fileno);
829
830 if (elf_interpreter) {
831 retval = copy_strings_kernel(1, &passed_p, bprm);
832 if (retval)
833 goto out_free_dentry;
834 bprm->argc++;
835 }
836 }
837
838 /* Flush all traces of the currently running executable */
839 retval = flush_old_exec(bprm);
840 if (retval)
841 goto out_free_dentry;
842
843 /* Discard our unneeded old files struct */
844 if (files) {
1da177e4
LT
845 put_files_struct(files);
846 files = NULL;
847 }
848
849 /* OK, This is the point of no return */
1da177e4
LT
850 current->flags &= ~PF_FORKNOEXEC;
851 current->mm->def_flags = def_flags;
852
853 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
854 may depend on the personality. */
855 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
856 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
857 current->personality |= READ_IMPLIES_EXEC;
858
f4e5cc2c 859 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1da177e4
LT
860 current->flags |= PF_RANDOMIZE;
861 arch_pick_mmap_layout(current->mm);
862
863 /* Do this so that we can load the interpreter, if need be. We will
864 change some of these later */
1da177e4 865 current->mm->free_area_cache = current->mm->mmap_base;
1363c3cd 866 current->mm->cached_hole_size = 0;
1da177e4
LT
867 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
868 executable_stack);
869 if (retval < 0) {
870 send_sig(SIGKILL, current, 0);
871 goto out_free_dentry;
872 }
873
1da177e4
LT
874 current->mm->start_stack = bprm->p;
875
876 /* Now we do a little grungy work by mmaping the ELF image into
cc503c1b 877 the correct location in memory. */
f4e5cc2c
JJ
878 for(i = 0, elf_ppnt = elf_phdata;
879 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
1da177e4
LT
880 int elf_prot = 0, elf_flags;
881 unsigned long k, vaddr;
882
883 if (elf_ppnt->p_type != PT_LOAD)
884 continue;
885
886 if (unlikely (elf_brk > elf_bss)) {
887 unsigned long nbyte;
888
889 /* There was a PT_LOAD segment with p_memsz > p_filesz
890 before this one. Map anonymous pages, if needed,
891 and clear the area. */
892 retval = set_brk (elf_bss + load_bias,
893 elf_brk + load_bias);
894 if (retval) {
895 send_sig(SIGKILL, current, 0);
896 goto out_free_dentry;
897 }
898 nbyte = ELF_PAGEOFFSET(elf_bss);
899 if (nbyte) {
900 nbyte = ELF_MIN_ALIGN - nbyte;
901 if (nbyte > elf_brk - elf_bss)
902 nbyte = elf_brk - elf_bss;
903 if (clear_user((void __user *)elf_bss +
904 load_bias, nbyte)) {
905 /*
906 * This bss-zeroing can fail if the ELF
f4e5cc2c 907 * file specifies odd protections. So
1da177e4
LT
908 * we don't check the return value
909 */
910 }
911 }
912 }
913
f4e5cc2c
JJ
914 if (elf_ppnt->p_flags & PF_R)
915 elf_prot |= PROT_READ;
916 if (elf_ppnt->p_flags & PF_W)
917 elf_prot |= PROT_WRITE;
918 if (elf_ppnt->p_flags & PF_X)
919 elf_prot |= PROT_EXEC;
1da177e4 920
f4e5cc2c 921 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1da177e4
LT
922
923 vaddr = elf_ppnt->p_vaddr;
924 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
925 elf_flags |= MAP_FIXED;
926 } else if (loc->elf_ex.e_type == ET_DYN) {
f4e5cc2c
JJ
927 /* Try and get dynamic programs out of the way of the
928 * default mmap base, as well as whatever program they
929 * might try to exec. This is because the brk will
930 * follow the loader, and is not movable. */
cc503c1b
JK
931#ifdef CONFIG_X86
932 load_bias = 0;
933#else
90cb28e8 934 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
cc503c1b 935#endif
1da177e4
LT
936 }
937
f4e5cc2c 938 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
cc503c1b 939 elf_prot, elf_flags,0);
1da177e4
LT
940 if (BAD_ADDR(error)) {
941 send_sig(SIGKILL, current, 0);
b140f251
AK
942 retval = IS_ERR((void *)error) ?
943 PTR_ERR((void*)error) : -EINVAL;
1da177e4
LT
944 goto out_free_dentry;
945 }
946
947 if (!load_addr_set) {
948 load_addr_set = 1;
949 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
950 if (loc->elf_ex.e_type == ET_DYN) {
951 load_bias += error -
952 ELF_PAGESTART(load_bias + vaddr);
953 load_addr += load_bias;
954 reloc_func_desc = load_bias;
955 }
956 }
957 k = elf_ppnt->p_vaddr;
f4e5cc2c
JJ
958 if (k < start_code)
959 start_code = k;
960 if (start_data < k)
961 start_data = k;
1da177e4
LT
962
963 /*
964 * Check to see if the section's size will overflow the
965 * allowed task size. Note that p_filesz must always be
966 * <= p_memsz so it is only necessary to check p_memsz.
967 */
ce51059b 968 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1da177e4
LT
969 elf_ppnt->p_memsz > TASK_SIZE ||
970 TASK_SIZE - elf_ppnt->p_memsz < k) {
f4e5cc2c 971 /* set_brk can never work. Avoid overflows. */
1da177e4 972 send_sig(SIGKILL, current, 0);
b140f251 973 retval = -EINVAL;
1da177e4
LT
974 goto out_free_dentry;
975 }
976
977 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
978
979 if (k > elf_bss)
980 elf_bss = k;
981 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
982 end_code = k;
983 if (end_data < k)
984 end_data = k;
985 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
986 if (k > elf_brk)
987 elf_brk = k;
988 }
989
990 loc->elf_ex.e_entry += load_bias;
991 elf_bss += load_bias;
992 elf_brk += load_bias;
993 start_code += load_bias;
994 end_code += load_bias;
995 start_data += load_bias;
996 end_data += load_bias;
997
998 /* Calling set_brk effectively mmaps the pages that we need
999 * for the bss and break sections. We must do this before
1000 * mapping in the interpreter, to make sure it doesn't wind
1001 * up getting placed where the bss needs to go.
1002 */
1003 retval = set_brk(elf_bss, elf_brk);
1004 if (retval) {
1005 send_sig(SIGKILL, current, 0);
1006 goto out_free_dentry;
1007 }
6de50517 1008 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1da177e4
LT
1009 send_sig(SIGSEGV, current, 0);
1010 retval = -EFAULT; /* Nobody gets to see this, but.. */
1011 goto out_free_dentry;
1012 }
1013
1014 if (elf_interpreter) {
cc503c1b 1015 if (interpreter_type == INTERPRETER_AOUT) {
1da177e4
LT
1016 elf_entry = load_aout_interp(&loc->interp_ex,
1017 interpreter);
cc503c1b
JK
1018 } else {
1019 unsigned long uninitialized_var(interp_map_addr);
1020
1da177e4
LT
1021 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1022 interpreter,
cc503c1b
JK
1023 &interp_map_addr,
1024 load_bias);
1025 if (!IS_ERR((void *)elf_entry)) {
1026 /*
1027 * load_elf_interp() returns relocation
1028 * adjustment
1029 */
1030 interp_load_addr = elf_entry;
1031 elf_entry += loc->interp_elf_ex.e_entry;
1032 }
1033 }
1da177e4 1034 if (BAD_ADDR(elf_entry)) {
1da177e4 1035 force_sig(SIGSEGV, current);
ce51059b
CE
1036 retval = IS_ERR((void *)elf_entry) ?
1037 (int)elf_entry : -EINVAL;
1da177e4
LT
1038 goto out_free_dentry;
1039 }
1040 reloc_func_desc = interp_load_addr;
1041
1042 allow_write_access(interpreter);
1043 fput(interpreter);
1044 kfree(elf_interpreter);
1045 } else {
1046 elf_entry = loc->elf_ex.e_entry;
5342fba5 1047 if (BAD_ADDR(elf_entry)) {
ce51059b
CE
1048 force_sig(SIGSEGV, current);
1049 retval = -EINVAL;
5342fba5
SS
1050 goto out_free_dentry;
1051 }
1da177e4
LT
1052 }
1053
1054 kfree(elf_phdata);
1055
1056 if (interpreter_type != INTERPRETER_AOUT)
1057 sys_close(elf_exec_fileno);
1058
1059 set_binfmt(&elf_format);
1060
547ee84c
BH
1061#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1062 retval = arch_setup_additional_pages(bprm, executable_stack);
1063 if (retval < 0) {
1064 send_sig(SIGKILL, current, 0);
18c8baff 1065 goto out;
547ee84c
BH
1066 }
1067#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1068
1da177e4
LT
1069 compute_creds(bprm);
1070 current->flags &= ~PF_FORKNOEXEC;
b6a2fea3 1071 retval = create_elf_tables(bprm, &loc->elf_ex,
f4e5cc2c
JJ
1072 (interpreter_type == INTERPRETER_AOUT),
1073 load_addr, interp_load_addr);
b6a2fea3
OW
1074 if (retval < 0) {
1075 send_sig(SIGKILL, current, 0);
1076 goto out;
1077 }
1da177e4
LT
1078 /* N.B. passed_fileno might not be initialized? */
1079 if (interpreter_type == INTERPRETER_AOUT)
1080 current->mm->arg_start += strlen(passed_fileno) + 1;
1081 current->mm->end_code = end_code;
1082 current->mm->start_code = start_code;
1083 current->mm->start_data = start_data;
1084 current->mm->end_data = end_data;
1085 current->mm->start_stack = bprm->p;
1086
c1d171a0
JK
1087#ifdef arch_randomize_brk
1088 if (current->flags & PF_RANDOMIZE)
1089 current->mm->brk = current->mm->start_brk =
1090 arch_randomize_brk(current->mm);
1091#endif
1092
1da177e4
LT
1093 if (current->personality & MMAP_PAGE_ZERO) {
1094 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1095 and some applications "depend" upon this behavior.
1096 Since we do not have the power to recompile these, we
f4e5cc2c 1097 emulate the SVr4 behavior. Sigh. */
1da177e4
LT
1098 down_write(&current->mm->mmap_sem);
1099 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1100 MAP_FIXED | MAP_PRIVATE, 0);
1101 up_write(&current->mm->mmap_sem);
1102 }
1103
1104#ifdef ELF_PLAT_INIT
1105 /*
1106 * The ABI may specify that certain registers be set up in special
1107 * ways (on i386 %edx is the address of a DT_FINI function, for
1108 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1109 * that the e_entry field is the address of the function descriptor
1110 * for the startup routine, rather than the address of the startup
1111 * routine itself. This macro performs whatever initialization to
1112 * the regs structure is required as well as any relocations to the
1113 * function descriptor entries when executing dynamically links apps.
1114 */
1115 ELF_PLAT_INIT(regs, reloc_func_desc);
1116#endif
1117
1118 start_thread(regs, elf_entry, bprm->p);
1119 if (unlikely(current->ptrace & PT_PTRACED)) {
1120 if (current->ptrace & PT_TRACE_EXEC)
1121 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1122 else
1123 send_sig(SIGTRAP, current, 0);
1124 }
1125 retval = 0;
1126out:
1127 kfree(loc);
1128out_ret:
1129 return retval;
1130
1131 /* error cleanup */
1132out_free_dentry:
1133 allow_write_access(interpreter);
1134 if (interpreter)
1135 fput(interpreter);
1136out_free_interp:
f99d49ad 1137 kfree(elf_interpreter);
1da177e4
LT
1138out_free_file:
1139 sys_close(elf_exec_fileno);
1140out_free_fh:
3b9b8ab6
KK
1141 if (files)
1142 reset_files_struct(current, files);
1da177e4
LT
1143out_free_ph:
1144 kfree(elf_phdata);
1145 goto out;
1146}
1147
1148/* This is really simpleminded and specialized - we are loading an
1149 a.out library that is given an ELF header. */
1da177e4
LT
1150static int load_elf_library(struct file *file)
1151{
1152 struct elf_phdr *elf_phdata;
1153 struct elf_phdr *eppnt;
1154 unsigned long elf_bss, bss, len;
1155 int retval, error, i, j;
1156 struct elfhdr elf_ex;
1157
1158 error = -ENOEXEC;
f4e5cc2c 1159 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1da177e4
LT
1160 if (retval != sizeof(elf_ex))
1161 goto out;
1162
1163 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1164 goto out;
1165
1166 /* First of all, some simple consistency checks */
1167 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
f4e5cc2c 1168 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1da177e4
LT
1169 goto out;
1170
1171 /* Now read in all of the header information */
1172
1173 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1174 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1175
1176 error = -ENOMEM;
1177 elf_phdata = kmalloc(j, GFP_KERNEL);
1178 if (!elf_phdata)
1179 goto out;
1180
1181 eppnt = elf_phdata;
1182 error = -ENOEXEC;
1183 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1184 if (retval != j)
1185 goto out_free_ph;
1186
1187 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1188 if ((eppnt + i)->p_type == PT_LOAD)
1189 j++;
1190 if (j != 1)
1191 goto out_free_ph;
1192
1193 while (eppnt->p_type != PT_LOAD)
1194 eppnt++;
1195
1196 /* Now use mmap to map the library into memory. */
1197 down_write(&current->mm->mmap_sem);
1198 error = do_mmap(file,
1199 ELF_PAGESTART(eppnt->p_vaddr),
1200 (eppnt->p_filesz +
1201 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1202 PROT_READ | PROT_WRITE | PROT_EXEC,
1203 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1204 (eppnt->p_offset -
1205 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1206 up_write(&current->mm->mmap_sem);
1207 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1208 goto out_free_ph;
1209
1210 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1211 if (padzero(elf_bss)) {
1212 error = -EFAULT;
1213 goto out_free_ph;
1214 }
1215
f4e5cc2c
JJ
1216 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1217 ELF_MIN_ALIGN - 1);
1da177e4
LT
1218 bss = eppnt->p_memsz + eppnt->p_vaddr;
1219 if (bss > len) {
1220 down_write(&current->mm->mmap_sem);
1221 do_brk(len, bss - len);
1222 up_write(&current->mm->mmap_sem);
1223 }
1224 error = 0;
1225
1226out_free_ph:
1227 kfree(elf_phdata);
1228out:
1229 return error;
1230}
1231
1232/*
1233 * Note that some platforms still use traditional core dumps and not
1234 * the ELF core dump. Each platform can select it as appropriate.
1235 */
708e9a79 1236#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1da177e4
LT
1237
1238/*
1239 * ELF core dumper
1240 *
1241 * Modelled on fs/exec.c:aout_core_dump()
1242 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1243 */
1244/*
1245 * These are the only things you should do on a core-file: use only these
1246 * functions to write out all the necessary info.
1247 */
1248static int dump_write(struct file *file, const void *addr, int nr)
1249{
1250 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1251}
1252
5db92850 1253static int dump_seek(struct file *file, loff_t off)
1da177e4 1254{
d025c9db 1255 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
7f14daa1 1256 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
1da177e4 1257 return 0;
d025c9db
AK
1258 } else {
1259 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
1260 if (!buf)
1261 return 0;
1262 while (off > 0) {
1263 unsigned long n = off;
1264 if (n > PAGE_SIZE)
1265 n = PAGE_SIZE;
1266 if (!dump_write(file, buf, n))
1267 return 0;
1268 off -= n;
1269 }
1270 free_page((unsigned long)buf);
1271 }
1da177e4
LT
1272 return 1;
1273}
1274
1275/*
82df3973 1276 * Decide what to dump of a segment, part, all or none.
1da177e4 1277 */
82df3973
RM
1278static unsigned long vma_dump_size(struct vm_area_struct *vma,
1279 unsigned long mm_flags)
1da177e4 1280{
e5b97dde
RM
1281 /* The vma can be set up to tell us the answer directly. */
1282 if (vma->vm_flags & VM_ALWAYSDUMP)
82df3973 1283 goto whole;
e5b97dde 1284
1da177e4
LT
1285 /* Do not dump I/O mapped devices or special mappings */
1286 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1287 return 0;
1288
82df3973
RM
1289#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1290
a1b59e80
KH
1291 /* By default, dump shared memory if mapped from an anonymous file. */
1292 if (vma->vm_flags & VM_SHARED) {
82df3973
RM
1293 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
1294 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1295 goto whole;
1296 return 0;
a1b59e80 1297 }
1da177e4 1298
82df3973
RM
1299 /* Dump segments that have been written to. */
1300 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1301 goto whole;
1302 if (vma->vm_file == NULL)
1303 return 0;
1da177e4 1304
82df3973
RM
1305 if (FILTER(MAPPED_PRIVATE))
1306 goto whole;
1307
1308 /*
1309 * If this looks like the beginning of a DSO or executable mapping,
1310 * check for an ELF header. If we find one, dump the first page to
1311 * aid in determining what was mapped here.
1312 */
1313 if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
1314 u32 __user *header = (u32 __user *) vma->vm_start;
1315 u32 word;
1316 /*
1317 * Doing it this way gets the constant folded by GCC.
1318 */
1319 union {
1320 u32 cmp;
1321 char elfmag[SELFMAG];
1322 } magic;
1323 BUILD_BUG_ON(SELFMAG != sizeof word);
1324 magic.elfmag[EI_MAG0] = ELFMAG0;
1325 magic.elfmag[EI_MAG1] = ELFMAG1;
1326 magic.elfmag[EI_MAG2] = ELFMAG2;
1327 magic.elfmag[EI_MAG3] = ELFMAG3;
1328 if (get_user(word, header) == 0 && word == magic.cmp)
1329 return PAGE_SIZE;
1330 }
1331
1332#undef FILTER
1333
1334 return 0;
1335
1336whole:
1337 return vma->vm_end - vma->vm_start;
1da177e4
LT
1338}
1339
1da177e4
LT
1340/* An ELF note in memory */
1341struct memelfnote
1342{
1343 const char *name;
1344 int type;
1345 unsigned int datasz;
1346 void *data;
1347};
1348
1349static int notesize(struct memelfnote *en)
1350{
1351 int sz;
1352
1353 sz = sizeof(struct elf_note);
1354 sz += roundup(strlen(en->name) + 1, 4);
1355 sz += roundup(en->datasz, 4);
1356
1357 return sz;
1358}
1359
d025c9db
AK
1360#define DUMP_WRITE(addr, nr, foffset) \
1361 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1da177e4 1362
d025c9db 1363static int alignfile(struct file *file, loff_t *foffset)
1da177e4 1364{
a7a0d86f 1365 static const char buf[4] = { 0, };
d025c9db
AK
1366 DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
1367 return 1;
1368}
1da177e4 1369
d025c9db
AK
1370static int writenote(struct memelfnote *men, struct file *file,
1371 loff_t *foffset)
1372{
1373 struct elf_note en;
1da177e4
LT
1374 en.n_namesz = strlen(men->name) + 1;
1375 en.n_descsz = men->datasz;
1376 en.n_type = men->type;
1377
d025c9db
AK
1378 DUMP_WRITE(&en, sizeof(en), foffset);
1379 DUMP_WRITE(men->name, en.n_namesz, foffset);
1380 if (!alignfile(file, foffset))
1381 return 0;
1382 DUMP_WRITE(men->data, men->datasz, foffset);
1383 if (!alignfile(file, foffset))
1384 return 0;
1da177e4
LT
1385
1386 return 1;
1387}
1388#undef DUMP_WRITE
1da177e4
LT
1389
1390#define DUMP_WRITE(addr, nr) \
1391 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1392 goto end_coredump;
1393#define DUMP_SEEK(off) \
1394 if (!dump_seek(file, (off))) \
1395 goto end_coredump;
1396
858119e1 1397static void fill_elf_header(struct elfhdr *elf, int segs)
1da177e4
LT
1398{
1399 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1400 elf->e_ident[EI_CLASS] = ELF_CLASS;
1401 elf->e_ident[EI_DATA] = ELF_DATA;
1402 elf->e_ident[EI_VERSION] = EV_CURRENT;
1403 elf->e_ident[EI_OSABI] = ELF_OSABI;
1404 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1405
1406 elf->e_type = ET_CORE;
1407 elf->e_machine = ELF_ARCH;
1408 elf->e_version = EV_CURRENT;
1409 elf->e_entry = 0;
1410 elf->e_phoff = sizeof(struct elfhdr);
1411 elf->e_shoff = 0;
1412 elf->e_flags = ELF_CORE_EFLAGS;
1413 elf->e_ehsize = sizeof(struct elfhdr);
1414 elf->e_phentsize = sizeof(struct elf_phdr);
1415 elf->e_phnum = segs;
1416 elf->e_shentsize = 0;
1417 elf->e_shnum = 0;
1418 elf->e_shstrndx = 0;
1419 return;
1420}
1421
8d6b5eee 1422static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1da177e4
LT
1423{
1424 phdr->p_type = PT_NOTE;
1425 phdr->p_offset = offset;
1426 phdr->p_vaddr = 0;
1427 phdr->p_paddr = 0;
1428 phdr->p_filesz = sz;
1429 phdr->p_memsz = 0;
1430 phdr->p_flags = 0;
1431 phdr->p_align = 0;
1432 return;
1433}
1434
1435static void fill_note(struct memelfnote *note, const char *name, int type,
1436 unsigned int sz, void *data)
1437{
1438 note->name = name;
1439 note->type = type;
1440 note->datasz = sz;
1441 note->data = data;
1442 return;
1443}
1444
1445/*
f4e5cc2c
JJ
1446 * fill up all the fields in prstatus from the given task struct, except
1447 * registers which need to be filled up separately.
1da177e4
LT
1448 */
1449static void fill_prstatus(struct elf_prstatus *prstatus,
f4e5cc2c 1450 struct task_struct *p, long signr)
1da177e4
LT
1451{
1452 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1453 prstatus->pr_sigpend = p->pending.signal.sig[0];
1454 prstatus->pr_sighold = p->blocked.sig[0];
b488893a 1455 prstatus->pr_pid = task_pid_vnr(p);
45626bb2 1456 prstatus->pr_ppid = task_pid_vnr(p->real_parent);
b488893a
PE
1457 prstatus->pr_pgrp = task_pgrp_vnr(p);
1458 prstatus->pr_sid = task_session_vnr(p);
1da177e4
LT
1459 if (thread_group_leader(p)) {
1460 /*
1461 * This is the record for the group leader. Add in the
1462 * cumulative times of previous dead threads. This total
1463 * won't include the time of each live thread whose state
1464 * is included in the core dump. The final total reported
1465 * to our parent process when it calls wait4 will include
1466 * those sums as well as the little bit more time it takes
1467 * this and each other thread to finish dying after the
1468 * core dump synchronization phase.
1469 */
1470 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1471 &prstatus->pr_utime);
1472 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1473 &prstatus->pr_stime);
1474 } else {
1475 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1476 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1477 }
1478 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1479 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1480}
1481
1482static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1483 struct mm_struct *mm)
1484{
a84a5059 1485 unsigned int i, len;
1da177e4
LT
1486
1487 /* first copy the parameters from user space */
1488 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1489
1490 len = mm->arg_end - mm->arg_start;
1491 if (len >= ELF_PRARGSZ)
1492 len = ELF_PRARGSZ-1;
1493 if (copy_from_user(&psinfo->pr_psargs,
1494 (const char __user *)mm->arg_start, len))
1495 return -EFAULT;
1496 for(i = 0; i < len; i++)
1497 if (psinfo->pr_psargs[i] == 0)
1498 psinfo->pr_psargs[i] = ' ';
1499 psinfo->pr_psargs[len] = 0;
1500
b488893a 1501 psinfo->pr_pid = task_pid_vnr(p);
45626bb2 1502 psinfo->pr_ppid = task_pid_vnr(p->real_parent);
b488893a
PE
1503 psinfo->pr_pgrp = task_pgrp_vnr(p);
1504 psinfo->pr_sid = task_session_vnr(p);
1da177e4
LT
1505
1506 i = p->state ? ffz(~p->state) + 1 : 0;
1507 psinfo->pr_state = i;
55148548 1508 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1da177e4
LT
1509 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1510 psinfo->pr_nice = task_nice(p);
1511 psinfo->pr_flag = p->flags;
1512 SET_UID(psinfo->pr_uid, p->uid);
1513 SET_GID(psinfo->pr_gid, p->gid);
1514 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1515
1516 return 0;
1517}
1518
1519/* Here is the structure in which status of each thread is captured. */
1520struct elf_thread_status
1521{
1522 struct list_head list;
1523 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1524 elf_fpregset_t fpu; /* NT_PRFPREG */
1525 struct task_struct *thread;
1526#ifdef ELF_CORE_COPY_XFPREGS
5b20cd80 1527 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1da177e4
LT
1528#endif
1529 struct memelfnote notes[3];
1530 int num_notes;
1531};
1532
1533/*
1534 * In order to add the specific thread information for the elf file format,
f4e5cc2c
JJ
1535 * we need to keep a linked list of every threads pr_status and then create
1536 * a single section for them in the final core file.
1da177e4
LT
1537 */
1538static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1539{
1540 int sz = 0;
1541 struct task_struct *p = t->thread;
1542 t->num_notes = 0;
1543
1544 fill_prstatus(&t->prstatus, p, signr);
1545 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1546
f4e5cc2c
JJ
1547 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1548 &(t->prstatus));
1da177e4
LT
1549 t->num_notes++;
1550 sz += notesize(&t->notes[0]);
1551
f4e5cc2c
JJ
1552 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1553 &t->fpu))) {
1554 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1555 &(t->fpu));
1da177e4
LT
1556 t->num_notes++;
1557 sz += notesize(&t->notes[1]);
1558 }
1559
1560#ifdef ELF_CORE_COPY_XFPREGS
1561 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
5b20cd80
MN
1562 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1563 sizeof(t->xfpu), &t->xfpu);
1da177e4
LT
1564 t->num_notes++;
1565 sz += notesize(&t->notes[2]);
1566 }
1567#endif
1568 return sz;
1569}
1570
f47aef55
RM
1571static struct vm_area_struct *first_vma(struct task_struct *tsk,
1572 struct vm_area_struct *gate_vma)
1573{
1574 struct vm_area_struct *ret = tsk->mm->mmap;
1575
1576 if (ret)
1577 return ret;
1578 return gate_vma;
1579}
1580/*
1581 * Helper function for iterating across a vma list. It ensures that the caller
1582 * will visit `gate_vma' prior to terminating the search.
1583 */
1584static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1585 struct vm_area_struct *gate_vma)
1586{
1587 struct vm_area_struct *ret;
1588
1589 ret = this_vma->vm_next;
1590 if (ret)
1591 return ret;
1592 if (this_vma == gate_vma)
1593 return NULL;
1594 return gate_vma;
1595}
1596
1da177e4
LT
1597/*
1598 * Actual dumper
1599 *
1600 * This is a two-pass process; first we find the offsets of the bits,
1601 * and then they are actually written out. If we run out of core limit
1602 * we just truncate.
1603 */
7dc0b22e 1604static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
1da177e4
LT
1605{
1606#define NUM_NOTES 6
1607 int has_dumped = 0;
1608 mm_segment_t fs;
1609 int segs;
1610 size_t size = 0;
1611 int i;
f47aef55 1612 struct vm_area_struct *vma, *gate_vma;
1da177e4 1613 struct elfhdr *elf = NULL;
d025c9db 1614 loff_t offset = 0, dataoff, foffset;
1da177e4
LT
1615 int numnote;
1616 struct memelfnote *notes = NULL;
1617 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1618 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1619 struct task_struct *g, *p;
1620 LIST_HEAD(thread_list);
1621 struct list_head *t;
1622 elf_fpregset_t *fpu = NULL;
1623#ifdef ELF_CORE_COPY_XFPREGS
1624 elf_fpxregset_t *xfpu = NULL;
1625#endif
1626 int thread_status_size = 0;
1627 elf_addr_t *auxv;
a1b59e80 1628 unsigned long mm_flags;
1da177e4
LT
1629
1630 /*
1631 * We no longer stop all VM operations.
1632 *
f4e5cc2c
JJ
1633 * This is because those proceses that could possibly change map_count
1634 * or the mmap / vma pages are now blocked in do_exit on current
1635 * finishing this core dump.
1da177e4
LT
1636 *
1637 * Only ptrace can touch these memory addresses, but it doesn't change
f4e5cc2c 1638 * the map_count or the pages allocated. So no possibility of crashing
1da177e4
LT
1639 * exists while dumping the mm->vm_next areas to the core file.
1640 */
1641
1642 /* alloc memory for large data structures: too large to be on stack */
1643 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1644 if (!elf)
1645 goto cleanup;
1646 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1647 if (!prstatus)
1648 goto cleanup;
1649 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1650 if (!psinfo)
1651 goto cleanup;
1652 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1653 if (!notes)
1654 goto cleanup;
1655 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1656 if (!fpu)
1657 goto cleanup;
1658#ifdef ELF_CORE_COPY_XFPREGS
1659 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1660 if (!xfpu)
1661 goto cleanup;
1662#endif
1663
1664 if (signr) {
1665 struct elf_thread_status *tmp;
486ccb05 1666 rcu_read_lock();
1da177e4
LT
1667 do_each_thread(g,p)
1668 if (current->mm == p->mm && current != p) {
11b0b5ab 1669 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1da177e4 1670 if (!tmp) {
486ccb05 1671 rcu_read_unlock();
1da177e4
LT
1672 goto cleanup;
1673 }
1da177e4
LT
1674 tmp->thread = p;
1675 list_add(&tmp->list, &thread_list);
1676 }
1677 while_each_thread(g,p);
486ccb05 1678 rcu_read_unlock();
1da177e4
LT
1679 list_for_each(t, &thread_list) {
1680 struct elf_thread_status *tmp;
1681 int sz;
1682
1683 tmp = list_entry(t, struct elf_thread_status, list);
1684 sz = elf_dump_thread_status(signr, tmp);
1685 thread_status_size += sz;
1686 }
1687 }
1688 /* now collect the dump for the current */
1689 memset(prstatus, 0, sizeof(*prstatus));
1690 fill_prstatus(prstatus, current, signr);
1691 elf_core_copy_regs(&prstatus->pr_reg, regs);
1692
1693 segs = current->mm->map_count;
1694#ifdef ELF_CORE_EXTRA_PHDRS
1695 segs += ELF_CORE_EXTRA_PHDRS;
1696#endif
1697
f47aef55
RM
1698 gate_vma = get_gate_vma(current);
1699 if (gate_vma != NULL)
1700 segs++;
1701
1da177e4 1702 /* Set up header */
f4e5cc2c 1703 fill_elf_header(elf, segs + 1); /* including notes section */
1da177e4
LT
1704
1705 has_dumped = 1;
1706 current->flags |= PF_DUMPCORE;
1707
1708 /*
1709 * Set up the notes in similar form to SVR4 core dumps made
1710 * with info from their /proc.
1711 */
1712
f4e5cc2c 1713 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1da177e4 1714 fill_psinfo(psinfo, current->group_leader, current->mm);
f4e5cc2c 1715 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1da177e4 1716
a9289728 1717 numnote = 2;
1da177e4 1718
f4e5cc2c 1719 auxv = (elf_addr_t *)current->mm->saved_auxv;
1da177e4
LT
1720
1721 i = 0;
1722 do
1723 i += 2;
1724 while (auxv[i - 2] != AT_NULL);
1725 fill_note(&notes[numnote++], "CORE", NT_AUXV,
f4e5cc2c 1726 i * sizeof(elf_addr_t), auxv);
1da177e4
LT
1727
1728 /* Try to dump the FPU. */
f4e5cc2c
JJ
1729 if ((prstatus->pr_fpvalid =
1730 elf_core_copy_task_fpregs(current, regs, fpu)))
1da177e4
LT
1731 fill_note(notes + numnote++,
1732 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1733#ifdef ELF_CORE_COPY_XFPREGS
1734 if (elf_core_copy_task_xfpregs(current, xfpu))
1735 fill_note(notes + numnote++,
5b20cd80 1736 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
1da177e4
LT
1737#endif
1738
1739 fs = get_fs();
1740 set_fs(KERNEL_DS);
1741
1742 DUMP_WRITE(elf, sizeof(*elf));
1743 offset += sizeof(*elf); /* Elf header */
a7a0d86f
PV
1744 offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
1745 foffset = offset;
1da177e4
LT
1746
1747 /* Write notes phdr entry */
1748 {
1749 struct elf_phdr phdr;
1750 int sz = 0;
1751
1752 for (i = 0; i < numnote; i++)
1753 sz += notesize(notes + i);
1754
1755 sz += thread_status_size;
1756
e5501492 1757 sz += elf_coredump_extra_notes_size();
bf1ab978 1758
1da177e4
LT
1759 fill_elf_note_phdr(&phdr, sz, offset);
1760 offset += sz;
1761 DUMP_WRITE(&phdr, sizeof(phdr));
1762 }
1763
1da177e4
LT
1764 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1765
a1b59e80
KH
1766 /*
1767 * We must use the same mm->flags while dumping core to avoid
1768 * inconsistency between the program headers and bodies, otherwise an
1769 * unusable core file can be generated.
1770 */
1771 mm_flags = current->mm->flags;
1772
1da177e4 1773 /* Write program headers for segments dump */
f47aef55
RM
1774 for (vma = first_vma(current, gate_vma); vma != NULL;
1775 vma = next_vma(vma, gate_vma)) {
1da177e4 1776 struct elf_phdr phdr;
1da177e4
LT
1777
1778 phdr.p_type = PT_LOAD;
1779 phdr.p_offset = offset;
1780 phdr.p_vaddr = vma->vm_start;
1781 phdr.p_paddr = 0;
82df3973
RM
1782 phdr.p_filesz = vma_dump_size(vma, mm_flags);
1783 phdr.p_memsz = vma->vm_end - vma->vm_start;
1da177e4
LT
1784 offset += phdr.p_filesz;
1785 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
f4e5cc2c
JJ
1786 if (vma->vm_flags & VM_WRITE)
1787 phdr.p_flags |= PF_W;
1788 if (vma->vm_flags & VM_EXEC)
1789 phdr.p_flags |= PF_X;
1da177e4
LT
1790 phdr.p_align = ELF_EXEC_PAGESIZE;
1791
1792 DUMP_WRITE(&phdr, sizeof(phdr));
1793 }
1794
1795#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1796 ELF_CORE_WRITE_EXTRA_PHDRS;
1797#endif
1798
1799 /* write out the notes section */
1800 for (i = 0; i < numnote; i++)
d025c9db 1801 if (!writenote(notes + i, file, &foffset))
1da177e4
LT
1802 goto end_coredump;
1803
e5501492
ME
1804 if (elf_coredump_extra_notes_write(file, &foffset))
1805 goto end_coredump;
bf1ab978 1806
1da177e4
LT
1807 /* write out the thread status notes section */
1808 list_for_each(t, &thread_list) {
f4e5cc2c
JJ
1809 struct elf_thread_status *tmp =
1810 list_entry(t, struct elf_thread_status, list);
1811
1da177e4 1812 for (i = 0; i < tmp->num_notes; i++)
d025c9db 1813 if (!writenote(&tmp->notes[i], file, &foffset))
1da177e4
LT
1814 goto end_coredump;
1815 }
d025c9db
AK
1816
1817 /* Align to page */
1818 DUMP_SEEK(dataoff - foffset);
1da177e4 1819
f47aef55
RM
1820 for (vma = first_vma(current, gate_vma); vma != NULL;
1821 vma = next_vma(vma, gate_vma)) {
1da177e4 1822 unsigned long addr;
82df3973 1823 unsigned long end;
1da177e4 1824
82df3973 1825 end = vma->vm_start + vma_dump_size(vma, mm_flags);
1da177e4 1826
82df3973 1827 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
f4e5cc2c 1828 struct page *page;
1da177e4
LT
1829 struct vm_area_struct *vma;
1830
1831 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1832 &page, &vma) <= 0) {
d025c9db 1833 DUMP_SEEK(PAGE_SIZE);
1da177e4 1834 } else {
557ed1fa 1835 if (page == ZERO_PAGE(0)) {
03221702
BP
1836 if (!dump_seek(file, PAGE_SIZE)) {
1837 page_cache_release(page);
1838 goto end_coredump;
1839 }
1da177e4
LT
1840 } else {
1841 void *kaddr;
f4e5cc2c
JJ
1842 flush_cache_page(vma, addr,
1843 page_to_pfn(page));
1da177e4
LT
1844 kaddr = kmap(page);
1845 if ((size += PAGE_SIZE) > limit ||
1846 !dump_write(file, kaddr,
1847 PAGE_SIZE)) {
1848 kunmap(page);
1849 page_cache_release(page);
1850 goto end_coredump;
1851 }
1852 kunmap(page);
1853 }
1854 page_cache_release(page);
1855 }
1856 }
1857 }
1858
1859#ifdef ELF_CORE_WRITE_EXTRA_DATA
1860 ELF_CORE_WRITE_EXTRA_DATA;
1861#endif
1862
1da177e4
LT
1863end_coredump:
1864 set_fs(fs);
1865
1866cleanup:
74da6cd0 1867 while (!list_empty(&thread_list)) {
1da177e4
LT
1868 struct list_head *tmp = thread_list.next;
1869 list_del(tmp);
1870 kfree(list_entry(tmp, struct elf_thread_status, list));
1871 }
1872
1873 kfree(elf);
1874 kfree(prstatus);
1875 kfree(psinfo);
1876 kfree(notes);
1877 kfree(fpu);
1878#ifdef ELF_CORE_COPY_XFPREGS
1879 kfree(xfpu);
1880#endif
1881 return has_dumped;
1882#undef NUM_NOTES
1883}
1884
1885#endif /* USE_ELF_CORE_DUMP */
1886
1887static int __init init_elf_binfmt(void)
1888{
1889 return register_binfmt(&elf_format);
1890}
1891
1892static void __exit exit_elf_binfmt(void)
1893{
1894 /* Remove the COFF and ELF loaders. */
1895 unregister_binfmt(&elf_format);
1896}
1897
1898core_initcall(init_elf_binfmt);
1899module_exit(exit_elf_binfmt);
1900MODULE_LICENSE("GPL");