#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <linux/utsname.h>
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
.load_binary = load_elf_binary,
.load_shlib = load_elf_library,
.core_dump = elf_core_dump,
- .min_coredump = ELF_EXEC_PAGESIZE
+ .min_coredump = ELF_EXEC_PAGESIZE,
+ .hasvdso = 1
};
#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
elf_addr_t *elf_info;
int ei_index = 0;
struct task_struct *tsk = current;
+ struct vm_area_struct *vma;
/*
* If this architecture has a platform capability string, copy it
sp = (elf_addr_t __user *)bprm->p;
#endif
+
+ /*
+ * Grow the stack manually; some architectures have a limit on how
+ * far ahead a user-space access may be in order to grow the stack.
+ */
+ vma = find_extend_vma(current->mm, bprm->p);
+ if (!vma)
+ return -EFAULT;
+
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
if (__put_user(argc, sp++))
return -EFAULT;
size_t len;
if (__put_user((elf_addr_t)p, argv++))
return -EFAULT;
- len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
- if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+ len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
return 0;
p += len;
}
size_t len;
if (__put_user((elf_addr_t)p, envp++))
return -EFAULT;
- len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
- if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
+ len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
return 0;
p += len;
}
#define INTERPRETER_ELF 2
#ifndef STACK_RND_MASK
-#define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
+#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
#endif
static unsigned long randomize_stack_top(unsigned long stack_top)
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
goto out_free_interp;
+
+ /*
+ * If the binary is not readable then enforce
+ * mm->dumpable = 0 regardless of the interpreter's
+ * permissions.
+ */
+ if (file_permission(interpreter, MAY_READ) < 0)
+ bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
+
retval = kernel_read(interpreter, 0, bprm->buf,
BINPRM_BUF_SIZE);
if (retval != BINPRM_BUF_SIZE) {
}
/* OK, This is the point of no return */
- current->mm->start_data = 0;
- current->mm->end_data = 0;
- current->mm->end_code = 0;
- current->mm->mmap = NULL;
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
elf_prot, elf_flags);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
+ retval = IS_ERR((void *)error) ?
+ PTR_ERR((void*)error) : -EINVAL;
goto out_free_dentry;
}
TASK_SIZE - elf_ppnt->p_memsz < k) {
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
+ retval = -EINVAL;
goto out_free_dentry;
}
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
- create_elf_tables(bprm, &loc->elf_ex,
+ retval = create_elf_tables(bprm, &loc->elf_ex,
(interpreter_type == INTERPRETER_AOUT),
load_addr, interp_load_addr);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+ goto out;
+ }
/* N.B. passed_fileno might not be initialized? */
if (interpreter_type == INTERPRETER_AOUT)
current->mm->arg_start += strlen(passed_fileno) + 1;
*
* I think we should skip something. But I am not sure how. H.J.
*/
-static int maydump(struct vm_area_struct *vma)
+static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
{
+ /* The vma can be set up to tell us the answer directly. */
+ if (vma->vm_flags & VM_ALWAYSDUMP)
+ return 1;
+
/* Do not dump I/O mapped devices or special mappings */
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
- /* Dump shared memory only if mapped from an anonymous file. */
- if (vma->vm_flags & VM_SHARED)
- return vma->vm_file->f_path.dentry->d_inode->i_nlink == 0;
+ /* By default, dump shared memory if mapped from an anonymous file. */
+ if (vma->vm_flags & VM_SHARED) {
+ if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0)
+ return test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
+ else
+ return test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
+ }
- /* If it hasn't been written to, don't write it out */
+ /* By default, if it hasn't been written to, don't write it out. */
if (!vma->anon_vma)
- return 0;
+ return test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
- return 1;
+ return test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
}
/* An ELF note in memory */
return sz;
}
+static struct vm_area_struct *first_vma(struct task_struct *tsk,
+ struct vm_area_struct *gate_vma)
+{
+ struct vm_area_struct *ret = tsk->mm->mmap;
+
+ if (ret)
+ return ret;
+ return gate_vma;
+}
+/*
+ * Helper function for iterating across a vma list. It ensures that the caller
+ * will visit `gate_vma' prior to terminating the search.
+ */
+static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
+ struct vm_area_struct *gate_vma)
+{
+ struct vm_area_struct *ret;
+
+ ret = this_vma->vm_next;
+ if (ret)
+ return ret;
+ if (this_vma == gate_vma)
+ return NULL;
+ return gate_vma;
+}
+
/*
* Actual dumper
*
int segs;
size_t size = 0;
int i;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *gate_vma;
struct elfhdr *elf = NULL;
loff_t offset = 0, dataoff, foffset;
unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
#endif
int thread_status_size = 0;
elf_addr_t *auxv;
+ unsigned long mm_flags;
/*
* We no longer stop all VM operations.
segs += ELF_CORE_EXTRA_PHDRS;
#endif
+ gate_vma = get_gate_vma(current);
+ if (gate_vma != NULL)
+ segs++;
+
/* Set up header */
fill_elf_header(elf, segs + 1); /* including notes section */
sz += thread_status_size;
-#ifdef ELF_CORE_WRITE_EXTRA_NOTES
- sz += ELF_CORE_EXTRA_NOTES_SIZE;
-#endif
+ sz += elf_coredump_extra_notes_size();
fill_elf_note_phdr(&phdr, sz, offset);
offset += sz;
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
+ /*
+ * We must use the same mm->flags while dumping core to avoid
+ * inconsistency between the program headers and bodies, otherwise an
+ * unusable core file can be generated.
+ */
+ mm_flags = current->mm->flags;
+
/* Write program headers for segments dump */
- for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+ for (vma = first_vma(current, gate_vma); vma != NULL;
+ vma = next_vma(vma, gate_vma)) {
struct elf_phdr phdr;
size_t sz;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
- phdr.p_filesz = maydump(vma) ? sz : 0;
+ phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
phdr.p_memsz = sz;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
if (!writenote(notes + i, file, &foffset))
goto end_coredump;
-#ifdef ELF_CORE_WRITE_EXTRA_NOTES
- ELF_CORE_WRITE_EXTRA_NOTES;
-#endif
+ if (elf_coredump_extra_notes_write(file, &foffset))
+ goto end_coredump;
/* write out the thread status notes section */
list_for_each(t, &thread_list) {
/* Align to page */
DUMP_SEEK(dataoff - foffset);
- for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+ for (vma = first_vma(current, gate_vma); vma != NULL;
+ vma = next_vma(vma, gate_vma)) {
unsigned long addr;
- if (!maydump(vma))
+ if (!maydump(vma, mm_flags))
continue;
for (addr = vma->vm_start;
DUMP_SEEK(PAGE_SIZE);
} else {
if (page == ZERO_PAGE(addr)) {
- DUMP_SEEK(PAGE_SIZE);
+ if (!dump_seek(file, PAGE_SIZE)) {
+ page_cache_release(page);
+ goto end_coredump;
+ }
} else {
void *kaddr;
flush_cache_page(vma, addr,