]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/binfmt_elf.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[net-next-2.6.git] / fs / binfmt_elf.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/stat.h>
16#include <linux/time.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
1da177e4
LT
19#include <linux/errno.h>
20#include <linux/signal.h>
21#include <linux/binfmts.h>
22#include <linux/string.h>
23#include <linux/file.h>
24#include <linux/fcntl.h>
25#include <linux/ptrace.h>
26#include <linux/slab.h>
27#include <linux/shm.h>
28#include <linux/personality.h>
29#include <linux/elfcore.h>
30#include <linux/init.h>
31#include <linux/highuid.h>
32#include <linux/smp.h>
1da177e4
LT
33#include <linux/compiler.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
36#include <linux/security.h>
37#include <linux/syscalls.h>
38#include <linux/random.h>
f4e5cc2c 39#include <linux/elf.h>
7e80d0d0 40#include <linux/utsname.h>
1da177e4
LT
41#include <asm/uaccess.h>
42#include <asm/param.h>
43#include <asm/page.h>
44
f4e5cc2c
JJ
45static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
46static int load_elf_library(struct file *);
bb1ad820
AM
47static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
48 int, int, unsigned long);
1da177e4 49
1da177e4
LT
50/*
51 * If we don't support core dumping, then supply a NULL so we
52 * don't even try.
53 */
708e9a79 54#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
7dc0b22e 55static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
1da177e4
LT
56#else
57#define elf_core_dump NULL
58#endif
59
60#if ELF_EXEC_PAGESIZE > PAGE_SIZE
f4e5cc2c 61#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
1da177e4 62#else
f4e5cc2c 63#define ELF_MIN_ALIGN PAGE_SIZE
1da177e4
LT
64#endif
65
66#ifndef ELF_CORE_EFLAGS
67#define ELF_CORE_EFLAGS 0
68#endif
69
70#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
73
74static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
9fbbd4dd
AK
79 .min_coredump = ELF_EXEC_PAGESIZE,
80 .hasvdso = 1
1da177e4
LT
81};
82
d4e3cc38 83#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
1da177e4
LT
84
85static int set_brk(unsigned long start, unsigned long end)
86{
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
89 if (end > start) {
90 unsigned long addr;
91 down_write(&current->mm->mmap_sem);
92 addr = do_brk(start, end - start);
93 up_write(&current->mm->mmap_sem);
94 if (BAD_ADDR(addr))
95 return addr;
96 }
97 current->mm->start_brk = current->mm->brk = end;
98 return 0;
99}
100
1da177e4
LT
101/* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
f4e5cc2c
JJ
104 be in memory
105 */
1da177e4
LT
106static int padzero(unsigned long elf_bss)
107{
108 unsigned long nbyte;
109
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 if (clear_user((void __user *) elf_bss, nbyte))
114 return -EFAULT;
115 }
116 return 0;
117}
118
09c6dd3c 119/* Let's use some macros to make this stack manipulation a little clearer */
1da177e4
LT
120#ifdef CONFIG_STACK_GROWSUP
121#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122#define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
f4e5cc2c
JJ
124#define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
126 old_sp; })
1da177e4
LT
127#else
128#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129#define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132#endif
133
483fad1c
NL
134#ifndef ELF_BASE_PLATFORM
135/*
136 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
137 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
138 * will be copied to the user stack in the same manner as AT_PLATFORM.
139 */
140#define ELF_BASE_PLATFORM NULL
141#endif
142
1da177e4 143static int
f4e5cc2c 144create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
d20894a2 145 unsigned long load_addr, unsigned long interp_load_addr)
1da177e4
LT
146{
147 unsigned long p = bprm->p;
148 int argc = bprm->argc;
149 int envc = bprm->envc;
150 elf_addr_t __user *argv;
151 elf_addr_t __user *envp;
152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform;
483fad1c 154 elf_addr_t __user *u_base_platform;
f06295b4 155 elf_addr_t __user *u_rand_bytes;
1da177e4 156 const char *k_platform = ELF_PLATFORM;
483fad1c 157 const char *k_base_platform = ELF_BASE_PLATFORM;
f06295b4 158 unsigned char k_rand_bytes[16];
1da177e4
LT
159 int items;
160 elf_addr_t *elf_info;
161 int ei_index = 0;
86a264ab 162 const struct cred *cred = current_cred();
b6a2fea3 163 struct vm_area_struct *vma;
1da177e4 164
d68c9d6a
FBH
165 /*
166 * In some cases (e.g. Hyper-Threading), we want to avoid L1
167 * evictions by the processes running on the same package. One
168 * thing we can do is to shuffle the initial stack for them.
169 */
170
171 p = arch_align_stack(p);
172
1da177e4
LT
173 /*
174 * If this architecture has a platform capability string, copy it
175 * to userspace. In some cases (Sparc), this info is impossible
176 * for userspace to get any other way, in others (i386) it is
177 * merely difficult.
178 */
1da177e4
LT
179 u_platform = NULL;
180 if (k_platform) {
181 size_t len = strlen(k_platform) + 1;
182
1da177e4
LT
183 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
184 if (__copy_to_user(u_platform, k_platform, len))
185 return -EFAULT;
186 }
187
483fad1c
NL
188 /*
189 * If this architecture has a "base" platform capability
190 * string, copy it to userspace.
191 */
192 u_base_platform = NULL;
193 if (k_base_platform) {
194 size_t len = strlen(k_base_platform) + 1;
195
196 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
197 if (__copy_to_user(u_base_platform, k_base_platform, len))
198 return -EFAULT;
199 }
200
f06295b4
KC
201 /*
202 * Generate 16 random bytes for userspace PRNG seeding.
203 */
204 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
205 u_rand_bytes = (elf_addr_t __user *)
206 STACK_ALLOC(p, sizeof(k_rand_bytes));
207 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
208 return -EFAULT;
209
1da177e4 210 /* Create the ELF interpreter info */
785d5570 211 elf_info = (elf_addr_t *)current->mm->saved_auxv;
4f9a58d7 212 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
1da177e4 213#define NEW_AUX_ENT(id, val) \
f4e5cc2c 214 do { \
785d5570
JJ
215 elf_info[ei_index++] = id; \
216 elf_info[ei_index++] = val; \
f4e5cc2c 217 } while (0)
1da177e4
LT
218
219#ifdef ARCH_DLINFO
220 /*
221 * ARCH_DLINFO must come first so PPC can do its special alignment of
222 * AUXV.
4f9a58d7
OH
223 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
224 * ARCH_DLINFO changes
1da177e4
LT
225 */
226 ARCH_DLINFO;
227#endif
228 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
229 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
230 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
231 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
f4e5cc2c 232 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
1da177e4
LT
233 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
234 NEW_AUX_ENT(AT_BASE, interp_load_addr);
235 NEW_AUX_ENT(AT_FLAGS, 0);
236 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
86a264ab
DH
237 NEW_AUX_ENT(AT_UID, cred->uid);
238 NEW_AUX_ENT(AT_EUID, cred->euid);
239 NEW_AUX_ENT(AT_GID, cred->gid);
240 NEW_AUX_ENT(AT_EGID, cred->egid);
785d5570 241 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
f06295b4 242 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
65191087 243 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
1da177e4 244 if (k_platform) {
f4e5cc2c 245 NEW_AUX_ENT(AT_PLATFORM,
785d5570 246 (elf_addr_t)(unsigned long)u_platform);
1da177e4 247 }
483fad1c
NL
248 if (k_base_platform) {
249 NEW_AUX_ENT(AT_BASE_PLATFORM,
250 (elf_addr_t)(unsigned long)u_base_platform);
251 }
1da177e4 252 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
785d5570 253 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
1da177e4
LT
254 }
255#undef NEW_AUX_ENT
256 /* AT_NULL is zero; clear the rest too */
257 memset(&elf_info[ei_index], 0,
258 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
259
260 /* And advance past the AT_NULL entry. */
261 ei_index += 2;
262
263 sp = STACK_ADD(p, ei_index);
264
d20894a2 265 items = (argc + 1) + (envc + 1) + 1;
1da177e4
LT
266 bprm->p = STACK_ROUND(sp, items);
267
268 /* Point sp at the lowest address on the stack */
269#ifdef CONFIG_STACK_GROWSUP
270 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
f4e5cc2c 271 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
1da177e4
LT
272#else
273 sp = (elf_addr_t __user *)bprm->p;
274#endif
275
b6a2fea3
OW
276
277 /*
278 * Grow the stack manually; some architectures have a limit on how
279 * far ahead a user-space access may be in order to grow the stack.
280 */
281 vma = find_extend_vma(current->mm, bprm->p);
282 if (!vma)
283 return -EFAULT;
284
1da177e4
LT
285 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
286 if (__put_user(argc, sp++))
287 return -EFAULT;
d20894a2
AK
288 argv = sp;
289 envp = argv + argc + 1;
1da177e4
LT
290
291 /* Populate argv and envp */
a84a5059 292 p = current->mm->arg_end = current->mm->arg_start;
1da177e4
LT
293 while (argc-- > 0) {
294 size_t len;
841d5fb7
HC
295 if (__put_user((elf_addr_t)p, argv++))
296 return -EFAULT;
b6a2fea3
OW
297 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
298 if (!len || len > MAX_ARG_STRLEN)
23c4971e 299 return -EINVAL;
1da177e4
LT
300 p += len;
301 }
302 if (__put_user(0, argv))
303 return -EFAULT;
304 current->mm->arg_end = current->mm->env_start = p;
305 while (envc-- > 0) {
306 size_t len;
841d5fb7
HC
307 if (__put_user((elf_addr_t)p, envp++))
308 return -EFAULT;
b6a2fea3
OW
309 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
310 if (!len || len > MAX_ARG_STRLEN)
23c4971e 311 return -EINVAL;
1da177e4
LT
312 p += len;
313 }
314 if (__put_user(0, envp))
315 return -EFAULT;
316 current->mm->env_end = p;
317
318 /* Put the elf_info on the stack in the right place. */
319 sp = (elf_addr_t __user *)envp + 1;
320 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
321 return -EFAULT;
322 return 0;
323}
324
325#ifndef elf_map
326
327static unsigned long elf_map(struct file *filep, unsigned long addr,
cc503c1b
JK
328 struct elf_phdr *eppnt, int prot, int type,
329 unsigned long total_size)
1da177e4
LT
330{
331 unsigned long map_addr;
cc503c1b
JK
332 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
333 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
334 addr = ELF_PAGESTART(addr);
335 size = ELF_PAGEALIGN(size);
1da177e4 336
dda6ebde
DG
337 /* mmap() will return -EINVAL if given a zero size, but a
338 * segment with zero filesize is perfectly valid */
cc503c1b
JK
339 if (!size)
340 return addr;
341
342 down_write(&current->mm->mmap_sem);
343 /*
344 * total_size is the size of the ELF (interpreter) image.
345 * The _first_ mmap needs to know the full size, otherwise
346 * randomization might put this image into an overlapping
347 * position with the ELF binary image. (since size < total_size)
348 * So we first map the 'big' image - and unmap the remainder at
349 * the end. (which unmap is needed for ELF images with holes.)
350 */
351 if (total_size) {
352 total_size = ELF_PAGEALIGN(total_size);
353 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
354 if (!BAD_ADDR(map_addr))
355 do_munmap(current->mm, map_addr+size, total_size-size);
356 } else
357 map_addr = do_mmap(filep, addr, size, prot, type, off);
358
1da177e4
LT
359 up_write(&current->mm->mmap_sem);
360 return(map_addr);
361}
362
363#endif /* !elf_map */
364
cc503c1b
JK
365static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
366{
367 int i, first_idx = -1, last_idx = -1;
368
369 for (i = 0; i < nr; i++) {
370 if (cmds[i].p_type == PT_LOAD) {
371 last_idx = i;
372 if (first_idx == -1)
373 first_idx = i;
374 }
375 }
376 if (first_idx == -1)
377 return 0;
378
379 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
380 ELF_PAGESTART(cmds[first_idx].p_vaddr);
381}
382
383
1da177e4
LT
384/* This is much more generalized than the library routine read function,
385 so we keep this separate. Technically the library read function
386 is only provided so that we can read a.out libraries that have
387 an ELF header */
388
f4e5cc2c 389static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
cc503c1b
JK
390 struct file *interpreter, unsigned long *interp_map_addr,
391 unsigned long no_base)
1da177e4
LT
392{
393 struct elf_phdr *elf_phdata;
394 struct elf_phdr *eppnt;
395 unsigned long load_addr = 0;
396 int load_addr_set = 0;
397 unsigned long last_bss = 0, elf_bss = 0;
398 unsigned long error = ~0UL;
cc503c1b 399 unsigned long total_size;
1da177e4
LT
400 int retval, i, size;
401
402 /* First of all, some simple consistency checks */
403 if (interp_elf_ex->e_type != ET_EXEC &&
404 interp_elf_ex->e_type != ET_DYN)
405 goto out;
406 if (!elf_check_arch(interp_elf_ex))
407 goto out;
408 if (!interpreter->f_op || !interpreter->f_op->mmap)
409 goto out;
410
411 /*
412 * If the size of this structure has changed, then punt, since
413 * we will be doing the wrong thing.
414 */
415 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
416 goto out;
417 if (interp_elf_ex->e_phnum < 1 ||
418 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
419 goto out;
420
421 /* Now read in all of the header information */
1da177e4
LT
422 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
423 if (size > ELF_MIN_ALIGN)
424 goto out;
f4e5cc2c 425 elf_phdata = kmalloc(size, GFP_KERNEL);
1da177e4
LT
426 if (!elf_phdata)
427 goto out;
428
f4e5cc2c
JJ
429 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
430 (char *)elf_phdata,size);
1da177e4
LT
431 error = -EIO;
432 if (retval != size) {
433 if (retval < 0)
434 error = retval;
435 goto out_close;
436 }
437
cc503c1b
JK
438 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
439 if (!total_size) {
440 error = -EINVAL;
441 goto out_close;
442 }
443
1da177e4 444 eppnt = elf_phdata;
f4e5cc2c
JJ
445 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
446 if (eppnt->p_type == PT_LOAD) {
447 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
448 int elf_prot = 0;
449 unsigned long vaddr = 0;
450 unsigned long k, map_addr;
451
452 if (eppnt->p_flags & PF_R)
453 elf_prot = PROT_READ;
454 if (eppnt->p_flags & PF_W)
455 elf_prot |= PROT_WRITE;
456 if (eppnt->p_flags & PF_X)
457 elf_prot |= PROT_EXEC;
458 vaddr = eppnt->p_vaddr;
459 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
460 elf_type |= MAP_FIXED;
cc503c1b
JK
461 else if (no_base && interp_elf_ex->e_type == ET_DYN)
462 load_addr = -vaddr;
f4e5cc2c
JJ
463
464 map_addr = elf_map(interpreter, load_addr + vaddr,
bb1ad820 465 eppnt, elf_prot, elf_type, total_size);
cc503c1b
JK
466 total_size = 0;
467 if (!*interp_map_addr)
468 *interp_map_addr = map_addr;
f4e5cc2c
JJ
469 error = map_addr;
470 if (BAD_ADDR(map_addr))
471 goto out_close;
472
473 if (!load_addr_set &&
474 interp_elf_ex->e_type == ET_DYN) {
475 load_addr = map_addr - ELF_PAGESTART(vaddr);
476 load_addr_set = 1;
477 }
478
479 /*
480 * Check to see if the section's size will overflow the
481 * allowed task size. Note that p_filesz must always be
482 * <= p_memsize so it's only necessary to check p_memsz.
483 */
484 k = load_addr + eppnt->p_vaddr;
ce51059b 485 if (BAD_ADDR(k) ||
f4e5cc2c
JJ
486 eppnt->p_filesz > eppnt->p_memsz ||
487 eppnt->p_memsz > TASK_SIZE ||
488 TASK_SIZE - eppnt->p_memsz < k) {
489 error = -ENOMEM;
490 goto out_close;
491 }
492
493 /*
494 * Find the end of the file mapping for this phdr, and
495 * keep track of the largest address we see for this.
496 */
497 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
498 if (k > elf_bss)
499 elf_bss = k;
500
501 /*
502 * Do the same thing for the memory mapping - between
503 * elf_bss and last_bss is the bss section.
504 */
505 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
506 if (k > last_bss)
507 last_bss = k;
508 }
1da177e4
LT
509 }
510
511 /*
512 * Now fill out the bss section. First pad the last page up
513 * to the page boundary, and then perform a mmap to make sure
514 * that there are zero-mapped pages up to and including the
515 * last bss page.
516 */
517 if (padzero(elf_bss)) {
518 error = -EFAULT;
519 goto out_close;
520 }
521
f4e5cc2c
JJ
522 /* What we have mapped so far */
523 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
1da177e4
LT
524
525 /* Map the last of the bss segment */
526 if (last_bss > elf_bss) {
527 down_write(&current->mm->mmap_sem);
528 error = do_brk(elf_bss, last_bss - elf_bss);
529 up_write(&current->mm->mmap_sem);
530 if (BAD_ADDR(error))
531 goto out_close;
532 }
533
cc503c1b 534 error = load_addr;
1da177e4
LT
535
536out_close:
537 kfree(elf_phdata);
538out:
539 return error;
540}
541
1da177e4
LT
542/*
543 * These are the functions used to load ELF style executables and shared
544 * libraries. There is no binary dependent code anywhere else.
545 */
546
547#define INTERPRETER_NONE 0
1da177e4
LT
548#define INTERPRETER_ELF 2
549
913bd906 550#ifndef STACK_RND_MASK
d1cabd63 551#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
913bd906 552#endif
1da177e4
LT
553
554static unsigned long randomize_stack_top(unsigned long stack_top)
555{
556 unsigned int random_variable = 0;
557
c16b63e0
AK
558 if ((current->flags & PF_RANDOMIZE) &&
559 !(current->personality & ADDR_NO_RANDOMIZE)) {
913bd906
AK
560 random_variable = get_random_int() & STACK_RND_MASK;
561 random_variable <<= PAGE_SHIFT;
562 }
1da177e4 563#ifdef CONFIG_STACK_GROWSUP
913bd906 564 return PAGE_ALIGN(stack_top) + random_variable;
1da177e4 565#else
913bd906 566 return PAGE_ALIGN(stack_top) - random_variable;
1da177e4
LT
567#endif
568}
569
f4e5cc2c 570static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1da177e4
LT
571{
572 struct file *interpreter = NULL; /* to shut gcc up */
573 unsigned long load_addr = 0, load_bias = 0;
574 int load_addr_set = 0;
575 char * elf_interpreter = NULL;
1da177e4 576 unsigned long error;
f4e5cc2c 577 struct elf_phdr *elf_ppnt, *elf_phdata;
1da177e4
LT
578 unsigned long elf_bss, elf_brk;
579 int elf_exec_fileno;
580 int retval, i;
581 unsigned int size;
cc503c1b
JK
582 unsigned long elf_entry;
583 unsigned long interp_load_addr = 0;
1da177e4
LT
584 unsigned long start_code, end_code, start_data, end_data;
585 unsigned long reloc_func_desc = 0;
8de61e69 586 int executable_stack = EXSTACK_DEFAULT;
1da177e4
LT
587 unsigned long def_flags = 0;
588 struct {
589 struct elfhdr elf_ex;
590 struct elfhdr interp_elf_ex;
1da177e4
LT
591 } *loc;
592
593 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
594 if (!loc) {
595 retval = -ENOMEM;
596 goto out_ret;
597 }
598
599 /* Get the exec-header */
f4e5cc2c 600 loc->elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
601
602 retval = -ENOEXEC;
603 /* First of all, some simple consistency checks */
604 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
605 goto out;
606
607 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
608 goto out;
609 if (!elf_check_arch(&loc->elf_ex))
610 goto out;
611 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
612 goto out;
613
614 /* Now read in all of the header information */
1da177e4
LT
615 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
616 goto out;
617 if (loc->elf_ex.e_phnum < 1 ||
618 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
619 goto out;
620 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
621 retval = -ENOMEM;
f4e5cc2c 622 elf_phdata = kmalloc(size, GFP_KERNEL);
1da177e4
LT
623 if (!elf_phdata)
624 goto out;
625
f4e5cc2c
JJ
626 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
627 (char *)elf_phdata, size);
1da177e4
LT
628 if (retval != size) {
629 if (retval >= 0)
630 retval = -EIO;
631 goto out_free_ph;
632 }
633
1da177e4
LT
634 retval = get_unused_fd();
635 if (retval < 0)
fd8328be 636 goto out_free_ph;
1da177e4
LT
637 get_file(bprm->file);
638 fd_install(elf_exec_fileno = retval, bprm->file);
639
640 elf_ppnt = elf_phdata;
641 elf_bss = 0;
642 elf_brk = 0;
643
644 start_code = ~0UL;
645 end_code = 0;
646 start_data = 0;
647 end_data = 0;
648
649 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
650 if (elf_ppnt->p_type == PT_INTERP) {
651 /* This is the program interpreter used for
652 * shared libraries - for now assume that this
653 * is an a.out format binary
654 */
1da177e4
LT
655 retval = -ENOEXEC;
656 if (elf_ppnt->p_filesz > PATH_MAX ||
657 elf_ppnt->p_filesz < 2)
658 goto out_free_file;
659
660 retval = -ENOMEM;
792db3af 661 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
f4e5cc2c 662 GFP_KERNEL);
1da177e4
LT
663 if (!elf_interpreter)
664 goto out_free_file;
665
666 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
f4e5cc2c
JJ
667 elf_interpreter,
668 elf_ppnt->p_filesz);
1da177e4
LT
669 if (retval != elf_ppnt->p_filesz) {
670 if (retval >= 0)
671 retval = -EIO;
672 goto out_free_interp;
673 }
674 /* make sure path is NULL terminated */
675 retval = -ENOEXEC;
676 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
677 goto out_free_interp;
678
1da177e4
LT
679 /*
680 * The early SET_PERSONALITY here is so that the lookup
681 * for the interpreter happens in the namespace of the
682 * to-be-execed image. SET_PERSONALITY can select an
683 * alternate root.
684 *
685 * However, SET_PERSONALITY is NOT allowed to switch
686 * this task into the new images's memory mapping
687 * policy - that is, TASK_SIZE must still evaluate to
688 * that which is appropriate to the execing application.
689 * This is because exit_mmap() needs to have TASK_SIZE
690 * evaluate to the size of the old image.
691 *
692 * So if (say) a 64-bit application is execing a 32-bit
693 * application it is the architecture's responsibility
694 * to defer changing the value of TASK_SIZE until the
695 * switch really is going to happen - do this in
696 * flush_thread(). - akpm
697 */
0b592682 698 SET_PERSONALITY(loc->elf_ex);
1da177e4
LT
699
700 interpreter = open_exec(elf_interpreter);
701 retval = PTR_ERR(interpreter);
702 if (IS_ERR(interpreter))
703 goto out_free_interp;
1fb84496
AD
704
705 /*
706 * If the binary is not readable then enforce
707 * mm->dumpable = 0 regardless of the interpreter's
708 * permissions.
709 */
710 if (file_permission(interpreter, MAY_READ) < 0)
711 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
712
f4e5cc2c
JJ
713 retval = kernel_read(interpreter, 0, bprm->buf,
714 BINPRM_BUF_SIZE);
1da177e4
LT
715 if (retval != BINPRM_BUF_SIZE) {
716 if (retval >= 0)
717 retval = -EIO;
718 goto out_free_dentry;
719 }
720
721 /* Get the exec headers */
f4e5cc2c 722 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
723 break;
724 }
725 elf_ppnt++;
726 }
727
728 elf_ppnt = elf_phdata;
729 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
730 if (elf_ppnt->p_type == PT_GNU_STACK) {
731 if (elf_ppnt->p_flags & PF_X)
732 executable_stack = EXSTACK_ENABLE_X;
733 else
734 executable_stack = EXSTACK_DISABLE_X;
735 break;
736 }
1da177e4
LT
737
738 /* Some simple consistency checks for the interpreter */
739 if (elf_interpreter) {
1da177e4 740 retval = -ELIBBAD;
d20894a2
AK
741 /* Not an ELF interpreter */
742 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1da177e4 743 goto out_free_dentry;
1da177e4 744 /* Verify the interpreter has a valid arch */
d20894a2 745 if (!elf_check_arch(&loc->interp_elf_ex))
1da177e4
LT
746 goto out_free_dentry;
747 } else {
748 /* Executables without an interpreter also need a personality */
0b592682 749 SET_PERSONALITY(loc->elf_ex);
1da177e4
LT
750 }
751
1da177e4
LT
752 /* Flush all traces of the currently running executable */
753 retval = flush_old_exec(bprm);
754 if (retval)
755 goto out_free_dentry;
756
1da177e4 757 /* OK, This is the point of no return */
1da177e4
LT
758 current->flags &= ~PF_FORKNOEXEC;
759 current->mm->def_flags = def_flags;
760
761 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
762 may depend on the personality. */
0b592682 763 SET_PERSONALITY(loc->elf_ex);
1da177e4
LT
764 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
765 current->personality |= READ_IMPLIES_EXEC;
766
f4e5cc2c 767 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1da177e4
LT
768 current->flags |= PF_RANDOMIZE;
769 arch_pick_mmap_layout(current->mm);
770
771 /* Do this so that we can load the interpreter, if need be. We will
772 change some of these later */
1da177e4 773 current->mm->free_area_cache = current->mm->mmap_base;
1363c3cd 774 current->mm->cached_hole_size = 0;
1da177e4
LT
775 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
776 executable_stack);
777 if (retval < 0) {
778 send_sig(SIGKILL, current, 0);
779 goto out_free_dentry;
780 }
781
1da177e4
LT
782 current->mm->start_stack = bprm->p;
783
784 /* Now we do a little grungy work by mmaping the ELF image into
cc503c1b 785 the correct location in memory. */
f4e5cc2c
JJ
786 for(i = 0, elf_ppnt = elf_phdata;
787 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
1da177e4
LT
788 int elf_prot = 0, elf_flags;
789 unsigned long k, vaddr;
790
791 if (elf_ppnt->p_type != PT_LOAD)
792 continue;
793
794 if (unlikely (elf_brk > elf_bss)) {
795 unsigned long nbyte;
796
797 /* There was a PT_LOAD segment with p_memsz > p_filesz
798 before this one. Map anonymous pages, if needed,
799 and clear the area. */
800 retval = set_brk (elf_bss + load_bias,
801 elf_brk + load_bias);
802 if (retval) {
803 send_sig(SIGKILL, current, 0);
804 goto out_free_dentry;
805 }
806 nbyte = ELF_PAGEOFFSET(elf_bss);
807 if (nbyte) {
808 nbyte = ELF_MIN_ALIGN - nbyte;
809 if (nbyte > elf_brk - elf_bss)
810 nbyte = elf_brk - elf_bss;
811 if (clear_user((void __user *)elf_bss +
812 load_bias, nbyte)) {
813 /*
814 * This bss-zeroing can fail if the ELF
f4e5cc2c 815 * file specifies odd protections. So
1da177e4
LT
816 * we don't check the return value
817 */
818 }
819 }
820 }
821
f4e5cc2c
JJ
822 if (elf_ppnt->p_flags & PF_R)
823 elf_prot |= PROT_READ;
824 if (elf_ppnt->p_flags & PF_W)
825 elf_prot |= PROT_WRITE;
826 if (elf_ppnt->p_flags & PF_X)
827 elf_prot |= PROT_EXEC;
1da177e4 828
f4e5cc2c 829 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1da177e4
LT
830
831 vaddr = elf_ppnt->p_vaddr;
832 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
833 elf_flags |= MAP_FIXED;
834 } else if (loc->elf_ex.e_type == ET_DYN) {
f4e5cc2c
JJ
835 /* Try and get dynamic programs out of the way of the
836 * default mmap base, as well as whatever program they
837 * might try to exec. This is because the brk will
838 * follow the loader, and is not movable. */
cc503c1b
JK
839#ifdef CONFIG_X86
840 load_bias = 0;
841#else
90cb28e8 842 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
cc503c1b 843#endif
1da177e4
LT
844 }
845
f4e5cc2c 846 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
bb1ad820 847 elf_prot, elf_flags, 0);
1da177e4
LT
848 if (BAD_ADDR(error)) {
849 send_sig(SIGKILL, current, 0);
b140f251
AK
850 retval = IS_ERR((void *)error) ?
851 PTR_ERR((void*)error) : -EINVAL;
1da177e4
LT
852 goto out_free_dentry;
853 }
854
855 if (!load_addr_set) {
856 load_addr_set = 1;
857 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
858 if (loc->elf_ex.e_type == ET_DYN) {
859 load_bias += error -
860 ELF_PAGESTART(load_bias + vaddr);
861 load_addr += load_bias;
862 reloc_func_desc = load_bias;
863 }
864 }
865 k = elf_ppnt->p_vaddr;
f4e5cc2c
JJ
866 if (k < start_code)
867 start_code = k;
868 if (start_data < k)
869 start_data = k;
1da177e4
LT
870
871 /*
872 * Check to see if the section's size will overflow the
873 * allowed task size. Note that p_filesz must always be
874 * <= p_memsz so it is only necessary to check p_memsz.
875 */
ce51059b 876 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1da177e4
LT
877 elf_ppnt->p_memsz > TASK_SIZE ||
878 TASK_SIZE - elf_ppnt->p_memsz < k) {
f4e5cc2c 879 /* set_brk can never work. Avoid overflows. */
1da177e4 880 send_sig(SIGKILL, current, 0);
b140f251 881 retval = -EINVAL;
1da177e4
LT
882 goto out_free_dentry;
883 }
884
885 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
886
887 if (k > elf_bss)
888 elf_bss = k;
889 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
890 end_code = k;
891 if (end_data < k)
892 end_data = k;
893 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
894 if (k > elf_brk)
895 elf_brk = k;
896 }
897
898 loc->elf_ex.e_entry += load_bias;
899 elf_bss += load_bias;
900 elf_brk += load_bias;
901 start_code += load_bias;
902 end_code += load_bias;
903 start_data += load_bias;
904 end_data += load_bias;
905
906 /* Calling set_brk effectively mmaps the pages that we need
907 * for the bss and break sections. We must do this before
908 * mapping in the interpreter, to make sure it doesn't wind
909 * up getting placed where the bss needs to go.
910 */
911 retval = set_brk(elf_bss, elf_brk);
912 if (retval) {
913 send_sig(SIGKILL, current, 0);
914 goto out_free_dentry;
915 }
6de50517 916 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1da177e4
LT
917 send_sig(SIGSEGV, current, 0);
918 retval = -EFAULT; /* Nobody gets to see this, but.. */
919 goto out_free_dentry;
920 }
921
922 if (elf_interpreter) {
d20894a2
AK
923 unsigned long uninitialized_var(interp_map_addr);
924
925 elf_entry = load_elf_interp(&loc->interp_elf_ex,
926 interpreter,
927 &interp_map_addr,
928 load_bias);
929 if (!IS_ERR((void *)elf_entry)) {
930 /*
931 * load_elf_interp() returns relocation
932 * adjustment
933 */
934 interp_load_addr = elf_entry;
935 elf_entry += loc->interp_elf_ex.e_entry;
cc503c1b 936 }
1da177e4 937 if (BAD_ADDR(elf_entry)) {
1da177e4 938 force_sig(SIGSEGV, current);
ce51059b
CE
939 retval = IS_ERR((void *)elf_entry) ?
940 (int)elf_entry : -EINVAL;
1da177e4
LT
941 goto out_free_dentry;
942 }
943 reloc_func_desc = interp_load_addr;
944
945 allow_write_access(interpreter);
946 fput(interpreter);
947 kfree(elf_interpreter);
948 } else {
949 elf_entry = loc->elf_ex.e_entry;
5342fba5 950 if (BAD_ADDR(elf_entry)) {
ce51059b
CE
951 force_sig(SIGSEGV, current);
952 retval = -EINVAL;
5342fba5
SS
953 goto out_free_dentry;
954 }
1da177e4
LT
955 }
956
957 kfree(elf_phdata);
958
d20894a2 959 sys_close(elf_exec_fileno);
1da177e4
LT
960
961 set_binfmt(&elf_format);
962
547ee84c 963#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
fc5243d9 964 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
547ee84c
BH
965 if (retval < 0) {
966 send_sig(SIGKILL, current, 0);
18c8baff 967 goto out;
547ee84c
BH
968 }
969#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
970
a6f76f23 971 install_exec_creds(bprm);
1da177e4 972 current->flags &= ~PF_FORKNOEXEC;
b6a2fea3 973 retval = create_elf_tables(bprm, &loc->elf_ex,
f4e5cc2c 974 load_addr, interp_load_addr);
b6a2fea3
OW
975 if (retval < 0) {
976 send_sig(SIGKILL, current, 0);
977 goto out;
978 }
1da177e4 979 /* N.B. passed_fileno might not be initialized? */
1da177e4
LT
980 current->mm->end_code = end_code;
981 current->mm->start_code = start_code;
982 current->mm->start_data = start_data;
983 current->mm->end_data = end_data;
984 current->mm->start_stack = bprm->p;
985
c1d171a0 986#ifdef arch_randomize_brk
32a93233 987 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
c1d171a0
JK
988 current->mm->brk = current->mm->start_brk =
989 arch_randomize_brk(current->mm);
990#endif
991
1da177e4
LT
992 if (current->personality & MMAP_PAGE_ZERO) {
993 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
994 and some applications "depend" upon this behavior.
995 Since we do not have the power to recompile these, we
f4e5cc2c 996 emulate the SVr4 behavior. Sigh. */
1da177e4
LT
997 down_write(&current->mm->mmap_sem);
998 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
999 MAP_FIXED | MAP_PRIVATE, 0);
1000 up_write(&current->mm->mmap_sem);
1001 }
1002
1003#ifdef ELF_PLAT_INIT
1004 /*
1005 * The ABI may specify that certain registers be set up in special
1006 * ways (on i386 %edx is the address of a DT_FINI function, for
1007 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1008 * that the e_entry field is the address of the function descriptor
1009 * for the startup routine, rather than the address of the startup
1010 * routine itself. This macro performs whatever initialization to
1011 * the regs structure is required as well as any relocations to the
1012 * function descriptor entries when executing dynamically links apps.
1013 */
1014 ELF_PLAT_INIT(regs, reloc_func_desc);
1015#endif
1016
1017 start_thread(regs, elf_entry, bprm->p);
1da177e4
LT
1018 retval = 0;
1019out:
1020 kfree(loc);
1021out_ret:
1022 return retval;
1023
1024 /* error cleanup */
1025out_free_dentry:
1026 allow_write_access(interpreter);
1027 if (interpreter)
1028 fput(interpreter);
1029out_free_interp:
f99d49ad 1030 kfree(elf_interpreter);
1da177e4
LT
1031out_free_file:
1032 sys_close(elf_exec_fileno);
1da177e4
LT
1033out_free_ph:
1034 kfree(elf_phdata);
1035 goto out;
1036}
1037
1038/* This is really simpleminded and specialized - we are loading an
1039 a.out library that is given an ELF header. */
1da177e4
LT
1040static int load_elf_library(struct file *file)
1041{
1042 struct elf_phdr *elf_phdata;
1043 struct elf_phdr *eppnt;
1044 unsigned long elf_bss, bss, len;
1045 int retval, error, i, j;
1046 struct elfhdr elf_ex;
1047
1048 error = -ENOEXEC;
f4e5cc2c 1049 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1da177e4
LT
1050 if (retval != sizeof(elf_ex))
1051 goto out;
1052
1053 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1054 goto out;
1055
1056 /* First of all, some simple consistency checks */
1057 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
f4e5cc2c 1058 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1da177e4
LT
1059 goto out;
1060
1061 /* Now read in all of the header information */
1062
1063 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1064 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1065
1066 error = -ENOMEM;
1067 elf_phdata = kmalloc(j, GFP_KERNEL);
1068 if (!elf_phdata)
1069 goto out;
1070
1071 eppnt = elf_phdata;
1072 error = -ENOEXEC;
1073 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1074 if (retval != j)
1075 goto out_free_ph;
1076
1077 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1078 if ((eppnt + i)->p_type == PT_LOAD)
1079 j++;
1080 if (j != 1)
1081 goto out_free_ph;
1082
1083 while (eppnt->p_type != PT_LOAD)
1084 eppnt++;
1085
1086 /* Now use mmap to map the library into memory. */
1087 down_write(&current->mm->mmap_sem);
1088 error = do_mmap(file,
1089 ELF_PAGESTART(eppnt->p_vaddr),
1090 (eppnt->p_filesz +
1091 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1092 PROT_READ | PROT_WRITE | PROT_EXEC,
1093 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1094 (eppnt->p_offset -
1095 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1096 up_write(&current->mm->mmap_sem);
1097 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1098 goto out_free_ph;
1099
1100 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1101 if (padzero(elf_bss)) {
1102 error = -EFAULT;
1103 goto out_free_ph;
1104 }
1105
f4e5cc2c
JJ
1106 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1107 ELF_MIN_ALIGN - 1);
1da177e4
LT
1108 bss = eppnt->p_memsz + eppnt->p_vaddr;
1109 if (bss > len) {
1110 down_write(&current->mm->mmap_sem);
1111 do_brk(len, bss - len);
1112 up_write(&current->mm->mmap_sem);
1113 }
1114 error = 0;
1115
1116out_free_ph:
1117 kfree(elf_phdata);
1118out:
1119 return error;
1120}
1121
1122/*
1123 * Note that some platforms still use traditional core dumps and not
1124 * the ELF core dump. Each platform can select it as appropriate.
1125 */
708e9a79 1126#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1da177e4
LT
1127
1128/*
1129 * ELF core dumper
1130 *
1131 * Modelled on fs/exec.c:aout_core_dump()
1132 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1133 */
1134/*
1135 * These are the only things you should do on a core-file: use only these
1136 * functions to write out all the necessary info.
1137 */
1138static int dump_write(struct file *file, const void *addr, int nr)
1139{
1140 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1141}
1142
5db92850 1143static int dump_seek(struct file *file, loff_t off)
1da177e4 1144{
d025c9db 1145 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
7f14daa1 1146 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
1da177e4 1147 return 0;
d025c9db
AK
1148 } else {
1149 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
1150 if (!buf)
1151 return 0;
1152 while (off > 0) {
1153 unsigned long n = off;
1154 if (n > PAGE_SIZE)
1155 n = PAGE_SIZE;
1156 if (!dump_write(file, buf, n))
1157 return 0;
1158 off -= n;
1159 }
1160 free_page((unsigned long)buf);
1161 }
1da177e4
LT
1162 return 1;
1163}
1164
1165/*
82df3973 1166 * Decide what to dump of a segment, part, all or none.
1da177e4 1167 */
82df3973
RM
1168static unsigned long vma_dump_size(struct vm_area_struct *vma,
1169 unsigned long mm_flags)
1da177e4 1170{
e575f111
KM
1171#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1172
e5b97dde
RM
1173 /* The vma can be set up to tell us the answer directly. */
1174 if (vma->vm_flags & VM_ALWAYSDUMP)
82df3973 1175 goto whole;
e5b97dde 1176
e575f111
KM
1177 /* Hugetlb memory check */
1178 if (vma->vm_flags & VM_HUGETLB) {
1179 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1180 goto whole;
1181 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1182 goto whole;
1183 }
1184
1da177e4
LT
1185 /* Do not dump I/O mapped devices or special mappings */
1186 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1187 return 0;
1188
a1b59e80
KH
1189 /* By default, dump shared memory if mapped from an anonymous file. */
1190 if (vma->vm_flags & VM_SHARED) {
82df3973
RM
1191 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
1192 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1193 goto whole;
1194 return 0;
a1b59e80 1195 }
1da177e4 1196
82df3973
RM
1197 /* Dump segments that have been written to. */
1198 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1199 goto whole;
1200 if (vma->vm_file == NULL)
1201 return 0;
1da177e4 1202
82df3973
RM
1203 if (FILTER(MAPPED_PRIVATE))
1204 goto whole;
1205
1206 /*
1207 * If this looks like the beginning of a DSO or executable mapping,
1208 * check for an ELF header. If we find one, dump the first page to
1209 * aid in determining what was mapped here.
1210 */
1211 if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
1212 u32 __user *header = (u32 __user *) vma->vm_start;
1213 u32 word;
1214 /*
1215 * Doing it this way gets the constant folded by GCC.
1216 */
1217 union {
1218 u32 cmp;
1219 char elfmag[SELFMAG];
1220 } magic;
1221 BUILD_BUG_ON(SELFMAG != sizeof word);
1222 magic.elfmag[EI_MAG0] = ELFMAG0;
1223 magic.elfmag[EI_MAG1] = ELFMAG1;
1224 magic.elfmag[EI_MAG2] = ELFMAG2;
1225 magic.elfmag[EI_MAG3] = ELFMAG3;
1226 if (get_user(word, header) == 0 && word == magic.cmp)
1227 return PAGE_SIZE;
1228 }
1229
1230#undef FILTER
1231
1232 return 0;
1233
1234whole:
1235 return vma->vm_end - vma->vm_start;
1da177e4
LT
1236}
1237
1da177e4
LT
1238/* An ELF note in memory */
1239struct memelfnote
1240{
1241 const char *name;
1242 int type;
1243 unsigned int datasz;
1244 void *data;
1245};
1246
1247static int notesize(struct memelfnote *en)
1248{
1249 int sz;
1250
1251 sz = sizeof(struct elf_note);
1252 sz += roundup(strlen(en->name) + 1, 4);
1253 sz += roundup(en->datasz, 4);
1254
1255 return sz;
1256}
1257
d025c9db
AK
1258#define DUMP_WRITE(addr, nr, foffset) \
1259 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1da177e4 1260
d025c9db 1261static int alignfile(struct file *file, loff_t *foffset)
1da177e4 1262{
a7a0d86f 1263 static const char buf[4] = { 0, };
d025c9db
AK
1264 DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
1265 return 1;
1266}
1da177e4 1267
d025c9db
AK
1268static int writenote(struct memelfnote *men, struct file *file,
1269 loff_t *foffset)
1270{
1271 struct elf_note en;
1da177e4
LT
1272 en.n_namesz = strlen(men->name) + 1;
1273 en.n_descsz = men->datasz;
1274 en.n_type = men->type;
1275
d025c9db
AK
1276 DUMP_WRITE(&en, sizeof(en), foffset);
1277 DUMP_WRITE(men->name, en.n_namesz, foffset);
1278 if (!alignfile(file, foffset))
1279 return 0;
1280 DUMP_WRITE(men->data, men->datasz, foffset);
1281 if (!alignfile(file, foffset))
1282 return 0;
1da177e4
LT
1283
1284 return 1;
1285}
1286#undef DUMP_WRITE
1da177e4
LT
1287
1288#define DUMP_WRITE(addr, nr) \
1289 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1290 goto end_coredump;
1291#define DUMP_SEEK(off) \
1292 if (!dump_seek(file, (off))) \
1293 goto end_coredump;
1294
3aba481f
RM
1295static void fill_elf_header(struct elfhdr *elf, int segs,
1296 u16 machine, u32 flags, u8 osabi)
1da177e4 1297{
6970c8ef
CG
1298 memset(elf, 0, sizeof(*elf));
1299
1da177e4
LT
1300 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1301 elf->e_ident[EI_CLASS] = ELF_CLASS;
1302 elf->e_ident[EI_DATA] = ELF_DATA;
1303 elf->e_ident[EI_VERSION] = EV_CURRENT;
1304 elf->e_ident[EI_OSABI] = ELF_OSABI;
1da177e4
LT
1305
1306 elf->e_type = ET_CORE;
3aba481f 1307 elf->e_machine = machine;
1da177e4 1308 elf->e_version = EV_CURRENT;
1da177e4 1309 elf->e_phoff = sizeof(struct elfhdr);
3aba481f 1310 elf->e_flags = flags;
1da177e4
LT
1311 elf->e_ehsize = sizeof(struct elfhdr);
1312 elf->e_phentsize = sizeof(struct elf_phdr);
1313 elf->e_phnum = segs;
6970c8ef 1314
1da177e4
LT
1315 return;
1316}
1317
8d6b5eee 1318static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1da177e4
LT
1319{
1320 phdr->p_type = PT_NOTE;
1321 phdr->p_offset = offset;
1322 phdr->p_vaddr = 0;
1323 phdr->p_paddr = 0;
1324 phdr->p_filesz = sz;
1325 phdr->p_memsz = 0;
1326 phdr->p_flags = 0;
1327 phdr->p_align = 0;
1328 return;
1329}
1330
1331static void fill_note(struct memelfnote *note, const char *name, int type,
1332 unsigned int sz, void *data)
1333{
1334 note->name = name;
1335 note->type = type;
1336 note->datasz = sz;
1337 note->data = data;
1338 return;
1339}
1340
1341/*
f4e5cc2c
JJ
1342 * fill up all the fields in prstatus from the given task struct, except
1343 * registers which need to be filled up separately.
1da177e4
LT
1344 */
1345static void fill_prstatus(struct elf_prstatus *prstatus,
f4e5cc2c 1346 struct task_struct *p, long signr)
1da177e4
LT
1347{
1348 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1349 prstatus->pr_sigpend = p->pending.signal.sig[0];
1350 prstatus->pr_sighold = p->blocked.sig[0];
b488893a 1351 prstatus->pr_pid = task_pid_vnr(p);
45626bb2 1352 prstatus->pr_ppid = task_pid_vnr(p->real_parent);
b488893a
PE
1353 prstatus->pr_pgrp = task_pgrp_vnr(p);
1354 prstatus->pr_sid = task_session_vnr(p);
1da177e4 1355 if (thread_group_leader(p)) {
f06febc9
FM
1356 struct task_cputime cputime;
1357
1da177e4 1358 /*
f06febc9
FM
1359 * This is the record for the group leader. It shows the
1360 * group-wide total, not its individual thread total.
1da177e4 1361 */
f06febc9
FM
1362 thread_group_cputime(p, &cputime);
1363 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1364 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1da177e4
LT
1365 } else {
1366 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1367 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1368 }
1369 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1370 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1371}
1372
1373static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1374 struct mm_struct *mm)
1375{
c69e8d9c 1376 const struct cred *cred;
a84a5059 1377 unsigned int i, len;
1da177e4
LT
1378
1379 /* first copy the parameters from user space */
1380 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1381
1382 len = mm->arg_end - mm->arg_start;
1383 if (len >= ELF_PRARGSZ)
1384 len = ELF_PRARGSZ-1;
1385 if (copy_from_user(&psinfo->pr_psargs,
1386 (const char __user *)mm->arg_start, len))
1387 return -EFAULT;
1388 for(i = 0; i < len; i++)
1389 if (psinfo->pr_psargs[i] == 0)
1390 psinfo->pr_psargs[i] = ' ';
1391 psinfo->pr_psargs[len] = 0;
1392
b488893a 1393 psinfo->pr_pid = task_pid_vnr(p);
45626bb2 1394 psinfo->pr_ppid = task_pid_vnr(p->real_parent);
b488893a
PE
1395 psinfo->pr_pgrp = task_pgrp_vnr(p);
1396 psinfo->pr_sid = task_session_vnr(p);
1da177e4
LT
1397
1398 i = p->state ? ffz(~p->state) + 1 : 0;
1399 psinfo->pr_state = i;
55148548 1400 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1da177e4
LT
1401 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1402 psinfo->pr_nice = task_nice(p);
1403 psinfo->pr_flag = p->flags;
c69e8d9c
DH
1404 rcu_read_lock();
1405 cred = __task_cred(p);
1406 SET_UID(psinfo->pr_uid, cred->uid);
1407 SET_GID(psinfo->pr_gid, cred->gid);
1408 rcu_read_unlock();
1da177e4
LT
1409 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1410
1411 return 0;
1412}
1413
3aba481f
RM
1414static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1415{
1416 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1417 int i = 0;
1418 do
1419 i += 2;
1420 while (auxv[i - 2] != AT_NULL);
1421 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1422}
1423
4206d3aa
RM
1424#ifdef CORE_DUMP_USE_REGSET
1425#include <linux/regset.h>
1426
1427struct elf_thread_core_info {
1428 struct elf_thread_core_info *next;
1429 struct task_struct *task;
1430 struct elf_prstatus prstatus;
1431 struct memelfnote notes[0];
1432};
1433
1434struct elf_note_info {
1435 struct elf_thread_core_info *thread;
1436 struct memelfnote psinfo;
1437 struct memelfnote auxv;
1438 size_t size;
1439 int thread_notes;
1440};
1441
d31472b6
RM
1442/*
1443 * When a regset has a writeback hook, we call it on each thread before
1444 * dumping user memory. On register window machines, this makes sure the
1445 * user memory backing the register data is up to date before we read it.
1446 */
1447static void do_thread_regset_writeback(struct task_struct *task,
1448 const struct user_regset *regset)
1449{
1450 if (regset->writeback)
1451 regset->writeback(task, regset, 1);
1452}
1453
4206d3aa
RM
1454static int fill_thread_core_info(struct elf_thread_core_info *t,
1455 const struct user_regset_view *view,
1456 long signr, size_t *total)
1457{
1458 unsigned int i;
1459
1460 /*
1461 * NT_PRSTATUS is the one special case, because the regset data
1462 * goes into the pr_reg field inside the note contents, rather
1463 * than being the whole note contents. We fill the reset in here.
1464 * We assume that regset 0 is NT_PRSTATUS.
1465 */
1466 fill_prstatus(&t->prstatus, t->task, signr);
1467 (void) view->regsets[0].get(t->task, &view->regsets[0],
1468 0, sizeof(t->prstatus.pr_reg),
1469 &t->prstatus.pr_reg, NULL);
1470
1471 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1472 sizeof(t->prstatus), &t->prstatus);
1473 *total += notesize(&t->notes[0]);
1474
d31472b6
RM
1475 do_thread_regset_writeback(t->task, &view->regsets[0]);
1476
4206d3aa
RM
1477 /*
1478 * Each other regset might generate a note too. For each regset
1479 * that has no core_note_type or is inactive, we leave t->notes[i]
1480 * all zero and we'll know to skip writing it later.
1481 */
1482 for (i = 1; i < view->n; ++i) {
1483 const struct user_regset *regset = &view->regsets[i];
d31472b6 1484 do_thread_regset_writeback(t->task, regset);
4206d3aa
RM
1485 if (regset->core_note_type &&
1486 (!regset->active || regset->active(t->task, regset))) {
1487 int ret;
1488 size_t size = regset->n * regset->size;
1489 void *data = kmalloc(size, GFP_KERNEL);
1490 if (unlikely(!data))
1491 return 0;
1492 ret = regset->get(t->task, regset,
1493 0, size, data, NULL);
1494 if (unlikely(ret))
1495 kfree(data);
1496 else {
1497 if (regset->core_note_type != NT_PRFPREG)
1498 fill_note(&t->notes[i], "LINUX",
1499 regset->core_note_type,
1500 size, data);
1501 else {
1502 t->prstatus.pr_fpvalid = 1;
1503 fill_note(&t->notes[i], "CORE",
1504 NT_PRFPREG, size, data);
1505 }
1506 *total += notesize(&t->notes[i]);
1507 }
1508 }
1509 }
1510
1511 return 1;
1512}
1513
1514static int fill_note_info(struct elfhdr *elf, int phdrs,
1515 struct elf_note_info *info,
1516 long signr, struct pt_regs *regs)
1517{
1518 struct task_struct *dump_task = current;
1519 const struct user_regset_view *view = task_user_regset_view(dump_task);
1520 struct elf_thread_core_info *t;
1521 struct elf_prpsinfo *psinfo;
83914441 1522 struct core_thread *ct;
4206d3aa
RM
1523 unsigned int i;
1524
1525 info->size = 0;
1526 info->thread = NULL;
1527
1528 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1529 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1530
1531 if (psinfo == NULL)
1532 return 0;
1533
1534 /*
1535 * Figure out how many notes we're going to need for each thread.
1536 */
1537 info->thread_notes = 0;
1538 for (i = 0; i < view->n; ++i)
1539 if (view->regsets[i].core_note_type != 0)
1540 ++info->thread_notes;
1541
1542 /*
1543 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1544 * since it is our one special case.
1545 */
1546 if (unlikely(info->thread_notes == 0) ||
1547 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1548 WARN_ON(1);
1549 return 0;
1550 }
1551
1552 /*
1553 * Initialize the ELF file header.
1554 */
1555 fill_elf_header(elf, phdrs,
1556 view->e_machine, view->e_flags, view->ei_osabi);
1557
1558 /*
1559 * Allocate a structure for each thread.
1560 */
83914441
ON
1561 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1562 t = kzalloc(offsetof(struct elf_thread_core_info,
1563 notes[info->thread_notes]),
1564 GFP_KERNEL);
1565 if (unlikely(!t))
1566 return 0;
1567
1568 t->task = ct->task;
1569 if (ct->task == dump_task || !info->thread) {
1570 t->next = info->thread;
1571 info->thread = t;
1572 } else {
1573 /*
1574 * Make sure to keep the original task at
1575 * the head of the list.
1576 */
1577 t->next = info->thread->next;
1578 info->thread->next = t;
4206d3aa 1579 }
83914441 1580 }
4206d3aa
RM
1581
1582 /*
1583 * Now fill in each thread's information.
1584 */
1585 for (t = info->thread; t != NULL; t = t->next)
1586 if (!fill_thread_core_info(t, view, signr, &info->size))
1587 return 0;
1588
1589 /*
1590 * Fill in the two process-wide notes.
1591 */
1592 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1593 info->size += notesize(&info->psinfo);
1594
1595 fill_auxv_note(&info->auxv, current->mm);
1596 info->size += notesize(&info->auxv);
1597
1598 return 1;
1599}
1600
1601static size_t get_note_info_size(struct elf_note_info *info)
1602{
1603 return info->size;
1604}
1605
1606/*
1607 * Write all the notes for each thread. When writing the first thread, the
1608 * process-wide notes are interleaved after the first thread-specific note.
1609 */
1610static int write_note_info(struct elf_note_info *info,
1611 struct file *file, loff_t *foffset)
1612{
1613 bool first = 1;
1614 struct elf_thread_core_info *t = info->thread;
1615
1616 do {
1617 int i;
1618
1619 if (!writenote(&t->notes[0], file, foffset))
1620 return 0;
1621
1622 if (first && !writenote(&info->psinfo, file, foffset))
1623 return 0;
1624 if (first && !writenote(&info->auxv, file, foffset))
1625 return 0;
1626
1627 for (i = 1; i < info->thread_notes; ++i)
1628 if (t->notes[i].data &&
1629 !writenote(&t->notes[i], file, foffset))
1630 return 0;
1631
1632 first = 0;
1633 t = t->next;
1634 } while (t);
1635
1636 return 1;
1637}
1638
1639static void free_note_info(struct elf_note_info *info)
1640{
1641 struct elf_thread_core_info *threads = info->thread;
1642 while (threads) {
1643 unsigned int i;
1644 struct elf_thread_core_info *t = threads;
1645 threads = t->next;
1646 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1647 for (i = 1; i < info->thread_notes; ++i)
1648 kfree(t->notes[i].data);
1649 kfree(t);
1650 }
1651 kfree(info->psinfo.data);
1652}
1653
1654#else
1655
1da177e4
LT
1656/* Here is the structure in which status of each thread is captured. */
1657struct elf_thread_status
1658{
1659 struct list_head list;
1660 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1661 elf_fpregset_t fpu; /* NT_PRFPREG */
1662 struct task_struct *thread;
1663#ifdef ELF_CORE_COPY_XFPREGS
5b20cd80 1664 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1da177e4
LT
1665#endif
1666 struct memelfnote notes[3];
1667 int num_notes;
1668};
1669
1670/*
1671 * In order to add the specific thread information for the elf file format,
f4e5cc2c
JJ
1672 * we need to keep a linked list of every threads pr_status and then create
1673 * a single section for them in the final core file.
1da177e4
LT
1674 */
1675static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1676{
1677 int sz = 0;
1678 struct task_struct *p = t->thread;
1679 t->num_notes = 0;
1680
1681 fill_prstatus(&t->prstatus, p, signr);
1682 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1683
f4e5cc2c
JJ
1684 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1685 &(t->prstatus));
1da177e4
LT
1686 t->num_notes++;
1687 sz += notesize(&t->notes[0]);
1688
f4e5cc2c
JJ
1689 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1690 &t->fpu))) {
1691 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1692 &(t->fpu));
1da177e4
LT
1693 t->num_notes++;
1694 sz += notesize(&t->notes[1]);
1695 }
1696
1697#ifdef ELF_CORE_COPY_XFPREGS
1698 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
5b20cd80
MN
1699 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1700 sizeof(t->xfpu), &t->xfpu);
1da177e4
LT
1701 t->num_notes++;
1702 sz += notesize(&t->notes[2]);
1703 }
1704#endif
1705 return sz;
1706}
1707
3aba481f
RM
1708struct elf_note_info {
1709 struct memelfnote *notes;
1710 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1711 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1712 struct list_head thread_list;
1713 elf_fpregset_t *fpu;
1714#ifdef ELF_CORE_COPY_XFPREGS
1715 elf_fpxregset_t *xfpu;
1716#endif
1717 int thread_status_size;
1718 int numnote;
1719};
1720
1721static int fill_note_info(struct elfhdr *elf, int phdrs,
1722 struct elf_note_info *info,
1723 long signr, struct pt_regs *regs)
1724{
1725#define NUM_NOTES 6
1726 struct list_head *t;
3aba481f
RM
1727
1728 info->notes = NULL;
1729 info->prstatus = NULL;
1730 info->psinfo = NULL;
1731 info->fpu = NULL;
1732#ifdef ELF_CORE_COPY_XFPREGS
1733 info->xfpu = NULL;
1734#endif
1735 INIT_LIST_HEAD(&info->thread_list);
1736
1737 info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote),
1738 GFP_KERNEL);
1739 if (!info->notes)
1740 return 0;
1741 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1742 if (!info->psinfo)
1743 return 0;
1744 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1745 if (!info->prstatus)
1746 return 0;
1747 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1748 if (!info->fpu)
1749 return 0;
1750#ifdef ELF_CORE_COPY_XFPREGS
1751 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1752 if (!info->xfpu)
1753 return 0;
1754#endif
1755
1756 info->thread_status_size = 0;
1757 if (signr) {
83914441 1758 struct core_thread *ct;
4220b7fe 1759 struct elf_thread_status *ets;
83914441
ON
1760
1761 for (ct = current->mm->core_state->dumper.next;
1762 ct; ct = ct->next) {
1763 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1764 if (!ets)
1765 return 0;
1766
1767 ets->thread = ct->task;
1768 list_add(&ets->list, &info->thread_list);
1769 }
1770
3aba481f 1771 list_for_each(t, &info->thread_list) {
3aba481f
RM
1772 int sz;
1773
4220b7fe
WC
1774 ets = list_entry(t, struct elf_thread_status, list);
1775 sz = elf_dump_thread_status(signr, ets);
3aba481f
RM
1776 info->thread_status_size += sz;
1777 }
1778 }
1779 /* now collect the dump for the current */
1780 memset(info->prstatus, 0, sizeof(*info->prstatus));
1781 fill_prstatus(info->prstatus, current, signr);
1782 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
1783
1784 /* Set up header */
1785 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS, ELF_OSABI);
1786
1787 /*
1788 * Set up the notes in similar form to SVR4 core dumps made
1789 * with info from their /proc.
1790 */
1791
1792 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
1793 sizeof(*info->prstatus), info->prstatus);
1794 fill_psinfo(info->psinfo, current->group_leader, current->mm);
1795 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
1796 sizeof(*info->psinfo), info->psinfo);
1797
1798 info->numnote = 2;
1799
1800 fill_auxv_note(&info->notes[info->numnote++], current->mm);
1801
1802 /* Try to dump the FPU. */
1803 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
1804 info->fpu);
1805 if (info->prstatus->pr_fpvalid)
1806 fill_note(info->notes + info->numnote++,
1807 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
1808#ifdef ELF_CORE_COPY_XFPREGS
1809 if (elf_core_copy_task_xfpregs(current, info->xfpu))
1810 fill_note(info->notes + info->numnote++,
1811 "LINUX", ELF_CORE_XFPREG_TYPE,
1812 sizeof(*info->xfpu), info->xfpu);
1813#endif
1814
1815 return 1;
1816
1817#undef NUM_NOTES
1818}
1819
1820static size_t get_note_info_size(struct elf_note_info *info)
1821{
1822 int sz = 0;
1823 int i;
1824
1825 for (i = 0; i < info->numnote; i++)
1826 sz += notesize(info->notes + i);
1827
1828 sz += info->thread_status_size;
1829
1830 return sz;
1831}
1832
1833static int write_note_info(struct elf_note_info *info,
1834 struct file *file, loff_t *foffset)
1835{
1836 int i;
1837 struct list_head *t;
1838
1839 for (i = 0; i < info->numnote; i++)
1840 if (!writenote(info->notes + i, file, foffset))
1841 return 0;
1842
1843 /* write out the thread status notes section */
1844 list_for_each(t, &info->thread_list) {
1845 struct elf_thread_status *tmp =
1846 list_entry(t, struct elf_thread_status, list);
1847
1848 for (i = 0; i < tmp->num_notes; i++)
1849 if (!writenote(&tmp->notes[i], file, foffset))
1850 return 0;
1851 }
1852
1853 return 1;
1854}
1855
1856static void free_note_info(struct elf_note_info *info)
1857{
1858 while (!list_empty(&info->thread_list)) {
1859 struct list_head *tmp = info->thread_list.next;
1860 list_del(tmp);
1861 kfree(list_entry(tmp, struct elf_thread_status, list));
1862 }
1863
1864 kfree(info->prstatus);
1865 kfree(info->psinfo);
1866 kfree(info->notes);
1867 kfree(info->fpu);
1868#ifdef ELF_CORE_COPY_XFPREGS
1869 kfree(info->xfpu);
1870#endif
1871}
1872
4206d3aa
RM
1873#endif
1874
f47aef55
RM
1875static struct vm_area_struct *first_vma(struct task_struct *tsk,
1876 struct vm_area_struct *gate_vma)
1877{
1878 struct vm_area_struct *ret = tsk->mm->mmap;
1879
1880 if (ret)
1881 return ret;
1882 return gate_vma;
1883}
1884/*
1885 * Helper function for iterating across a vma list. It ensures that the caller
1886 * will visit `gate_vma' prior to terminating the search.
1887 */
1888static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1889 struct vm_area_struct *gate_vma)
1890{
1891 struct vm_area_struct *ret;
1892
1893 ret = this_vma->vm_next;
1894 if (ret)
1895 return ret;
1896 if (this_vma == gate_vma)
1897 return NULL;
1898 return gate_vma;
1899}
1900
1da177e4
LT
1901/*
1902 * Actual dumper
1903 *
1904 * This is a two-pass process; first we find the offsets of the bits,
1905 * and then they are actually written out. If we run out of core limit
1906 * we just truncate.
1907 */
7dc0b22e 1908static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
1da177e4 1909{
1da177e4
LT
1910 int has_dumped = 0;
1911 mm_segment_t fs;
1912 int segs;
1913 size_t size = 0;
f47aef55 1914 struct vm_area_struct *vma, *gate_vma;
1da177e4 1915 struct elfhdr *elf = NULL;
d025c9db 1916 loff_t offset = 0, dataoff, foffset;
a1b59e80 1917 unsigned long mm_flags;
3aba481f 1918 struct elf_note_info info;
1da177e4
LT
1919
1920 /*
1921 * We no longer stop all VM operations.
1922 *
f4e5cc2c
JJ
1923 * This is because those proceses that could possibly change map_count
1924 * or the mmap / vma pages are now blocked in do_exit on current
1925 * finishing this core dump.
1da177e4
LT
1926 *
1927 * Only ptrace can touch these memory addresses, but it doesn't change
f4e5cc2c 1928 * the map_count or the pages allocated. So no possibility of crashing
1da177e4
LT
1929 * exists while dumping the mm->vm_next areas to the core file.
1930 */
1931
1932 /* alloc memory for large data structures: too large to be on stack */
1933 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1934 if (!elf)
5f719558 1935 goto out;
1da177e4
LT
1936
1937 segs = current->mm->map_count;
1938#ifdef ELF_CORE_EXTRA_PHDRS
1939 segs += ELF_CORE_EXTRA_PHDRS;
1940#endif
1941
f47aef55
RM
1942 gate_vma = get_gate_vma(current);
1943 if (gate_vma != NULL)
1944 segs++;
1945
1da177e4 1946 /*
3aba481f
RM
1947 * Collect all the non-memory information about the process for the
1948 * notes. This also sets up the file header.
1da177e4 1949 */
3aba481f
RM
1950 if (!fill_note_info(elf, segs + 1, /* including notes section */
1951 &info, signr, regs))
1952 goto cleanup;
1da177e4 1953
3aba481f
RM
1954 has_dumped = 1;
1955 current->flags |= PF_DUMPCORE;
1da177e4
LT
1956
1957 fs = get_fs();
1958 set_fs(KERNEL_DS);
1959
1960 DUMP_WRITE(elf, sizeof(*elf));
1961 offset += sizeof(*elf); /* Elf header */
a7a0d86f
PV
1962 offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
1963 foffset = offset;
1da177e4
LT
1964
1965 /* Write notes phdr entry */
1966 {
1967 struct elf_phdr phdr;
3aba481f 1968 size_t sz = get_note_info_size(&info);
1da177e4 1969
e5501492 1970 sz += elf_coredump_extra_notes_size();
bf1ab978 1971
1da177e4
LT
1972 fill_elf_note_phdr(&phdr, sz, offset);
1973 offset += sz;
1974 DUMP_WRITE(&phdr, sizeof(phdr));
1975 }
1976
1da177e4
LT
1977 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1978
a1b59e80
KH
1979 /*
1980 * We must use the same mm->flags while dumping core to avoid
1981 * inconsistency between the program headers and bodies, otherwise an
1982 * unusable core file can be generated.
1983 */
1984 mm_flags = current->mm->flags;
1985
1da177e4 1986 /* Write program headers for segments dump */
f47aef55
RM
1987 for (vma = first_vma(current, gate_vma); vma != NULL;
1988 vma = next_vma(vma, gate_vma)) {
1da177e4 1989 struct elf_phdr phdr;
1da177e4
LT
1990
1991 phdr.p_type = PT_LOAD;
1992 phdr.p_offset = offset;
1993 phdr.p_vaddr = vma->vm_start;
1994 phdr.p_paddr = 0;
82df3973
RM
1995 phdr.p_filesz = vma_dump_size(vma, mm_flags);
1996 phdr.p_memsz = vma->vm_end - vma->vm_start;
1da177e4
LT
1997 offset += phdr.p_filesz;
1998 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
f4e5cc2c
JJ
1999 if (vma->vm_flags & VM_WRITE)
2000 phdr.p_flags |= PF_W;
2001 if (vma->vm_flags & VM_EXEC)
2002 phdr.p_flags |= PF_X;
1da177e4
LT
2003 phdr.p_align = ELF_EXEC_PAGESIZE;
2004
2005 DUMP_WRITE(&phdr, sizeof(phdr));
2006 }
2007
2008#ifdef ELF_CORE_WRITE_EXTRA_PHDRS
2009 ELF_CORE_WRITE_EXTRA_PHDRS;
2010#endif
2011
2012 /* write out the notes section */
3aba481f
RM
2013 if (!write_note_info(&info, file, &foffset))
2014 goto end_coredump;
1da177e4 2015
e5501492
ME
2016 if (elf_coredump_extra_notes_write(file, &foffset))
2017 goto end_coredump;
bf1ab978 2018
d025c9db
AK
2019 /* Align to page */
2020 DUMP_SEEK(dataoff - foffset);
1da177e4 2021
f47aef55
RM
2022 for (vma = first_vma(current, gate_vma); vma != NULL;
2023 vma = next_vma(vma, gate_vma)) {
1da177e4 2024 unsigned long addr;
82df3973 2025 unsigned long end;
1da177e4 2026
82df3973 2027 end = vma->vm_start + vma_dump_size(vma, mm_flags);
1da177e4 2028
82df3973 2029 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
f4e5cc2c 2030 struct page *page;
4220b7fe 2031 struct vm_area_struct *tmp_vma;
1da177e4
LT
2032
2033 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
4220b7fe 2034 &page, &tmp_vma) <= 0) {
d025c9db 2035 DUMP_SEEK(PAGE_SIZE);
1da177e4 2036 } else {
557ed1fa 2037 if (page == ZERO_PAGE(0)) {
03221702
BP
2038 if (!dump_seek(file, PAGE_SIZE)) {
2039 page_cache_release(page);
2040 goto end_coredump;
2041 }
1da177e4
LT
2042 } else {
2043 void *kaddr;
4220b7fe 2044 flush_cache_page(tmp_vma, addr,
f4e5cc2c 2045 page_to_pfn(page));
1da177e4
LT
2046 kaddr = kmap(page);
2047 if ((size += PAGE_SIZE) > limit ||
2048 !dump_write(file, kaddr,
2049 PAGE_SIZE)) {
2050 kunmap(page);
2051 page_cache_release(page);
2052 goto end_coredump;
2053 }
2054 kunmap(page);
2055 }
2056 page_cache_release(page);
2057 }
2058 }
2059 }
2060
2061#ifdef ELF_CORE_WRITE_EXTRA_DATA
2062 ELF_CORE_WRITE_EXTRA_DATA;
2063#endif
2064
1da177e4
LT
2065end_coredump:
2066 set_fs(fs);
2067
2068cleanup:
3aba481f 2069 free_note_info(&info);
5f719558
WC
2070 kfree(elf);
2071out:
1da177e4 2072 return has_dumped;
1da177e4
LT
2073}
2074
2075#endif /* USE_ELF_CORE_DUMP */
2076
2077static int __init init_elf_binfmt(void)
2078{
2079 return register_binfmt(&elf_format);
2080}
2081
2082static void __exit exit_elf_binfmt(void)
2083{
2084 /* Remove the COFF and ELF loaders. */
2085 unregister_binfmt(&elf_format);
2086}
2087
2088core_initcall(init_elf_binfmt);
2089module_exit(exit_elf_binfmt);
2090MODULE_LICENSE("GPL");