]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/kexec.c
Add kernel/notifier.c
[linux-2.6-omap-h63xx.git] / kernel / kexec.c
index 2c95848fbce8261463e54833d60d68573e9fb9b8..e9f1b4ea504d920bd514deb8868536fd10de77bb 100644 (file)
@@ -6,6 +6,7 @@
  * Version 2.  See the file COPYING for more details.
  */
 
+#include <linux/capability.h>
 #include <linux/mm.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/syscalls.h>
 #include <linux/reboot.h>
-#include <linux/syscalls.h>
 #include <linux/ioport.h>
 #include <linux/hardirq.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+#include <linux/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/numa.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/system.h>
 #include <asm/semaphore.h>
+#include <asm/sections.h>
+
+/* Per cpu memory for storing cpu states in case of system crash. */
+note_buf_t* crash_notes;
+
+/* vmcoreinfo stuff */
+unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+size_t vmcoreinfo_size;
+size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
 
 /* Location of the reserved area for the crash kernel */
 struct resource crashk_res = {
@@ -36,7 +51,7 @@ struct resource crashk_res = {
 
 int kexec_should_crash(struct task_struct *p)
 {
-       if (in_interrupt() || !p->pid || p->pid == 1 || panic_on_oops)
+       if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops)
                return 1;
        return 0;
 }
@@ -104,11 +119,10 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
 
        /* Allocate a controlling structure */
        result = -ENOMEM;
-       image = kmalloc(sizeof(*image), GFP_KERNEL);
+       image = kzalloc(sizeof(*image), GFP_KERNEL);
        if (!image)
                goto out;
 
-       memset(image, 0, sizeof(*image));
        image->head = 0;
        image->entry = &image->head;
        image->last_entry = &image->head;
@@ -771,7 +785,7 @@ static int kimage_load_normal_segment(struct kimage *image,
                size_t uchunk, mchunk;
 
                page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
-               if (page == 0) {
+               if (!page) {
                        result  = -ENOMEM;
                        goto out;
                }
@@ -830,7 +844,7 @@ static int kimage_load_crash_segment(struct kimage *image,
                size_t uchunk, mchunk;
 
                page = pfn_to_page(maddr >> PAGE_SHIFT);
-               if (page == 0) {
+               if (!page) {
                        result  = -ENOMEM;
                        goto out;
                }
@@ -847,6 +861,7 @@ static int kimage_load_crash_segment(struct kimage *image,
                        memset(ptr + uchunk, 0, mchunk - uchunk);
                }
                result = copy_from_user(ptr, buf, uchunk);
+               kexec_flush_icache_page(page);
                kunmap(page);
                if (result) {
                        result = (result < 0) ? result : -EIO;
@@ -898,14 +913,14 @@ static int kimage_load_segment(struct kimage *image,
  * kexec does not sync, or unmount filesystems so if you need
  * that to happen you need to do that yourself.
  */
-struct kimage *kexec_image = NULL;
-static struct kimage *kexec_crash_image = NULL;
+struct kimage *kexec_image;
+struct kimage *kexec_crash_image;
 /*
  * A home grown binary mutex.
  * Nothing can wait so this mutex is safe to use
  * in interrupt context :)
  */
-static int kexec_lock = 0;
+static int kexec_lock;
 
 asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
                                struct kexec_segment __user *segments,
@@ -991,7 +1006,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
        image = xchg(dest_image, image);
 
 out:
-       xchg(&kexec_lock, 0); /* Release the mutex */
+       locked = xchg(&kexec_lock, 0); /* Release the mutex */
+       BUG_ON(!locked);
        kimage_free(image);
 
        return result;
@@ -1038,7 +1054,6 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
 
 void crash_kexec(struct pt_regs *regs)
 {
-       struct kimage *image;
        int locked;
 
 
@@ -1052,11 +1067,182 @@ void crash_kexec(struct pt_regs *regs)
         */
        locked = xchg(&kexec_lock, 1);
        if (!locked) {
-               image = xchg(&kexec_crash_image, NULL);
-               if (image) {
-                       machine_crash_shutdown(regs);
-                       machine_kexec(image);
+               if (kexec_crash_image) {
+                       struct pt_regs fixed_regs;
+                       crash_setup_regs(&fixed_regs, regs);
+                       crash_save_vmcoreinfo();
+                       machine_crash_shutdown(&fixed_regs);
+                       machine_kexec(kexec_crash_image);
                }
-               xchg(&kexec_lock, 0);
+               locked = xchg(&kexec_lock, 0);
+               BUG_ON(!locked);
        }
 }
+
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
+                           size_t data_len)
+{
+       struct elf_note note;
+
+       note.n_namesz = strlen(name) + 1;
+       note.n_descsz = data_len;
+       note.n_type   = type;
+       memcpy(buf, &note, sizeof(note));
+       buf += (sizeof(note) + 3)/4;
+       memcpy(buf, name, note.n_namesz);
+       buf += (note.n_namesz + 3)/4;
+       memcpy(buf, data, note.n_descsz);
+       buf += (note.n_descsz + 3)/4;
+
+       return buf;
+}
+
+static void final_note(u32 *buf)
+{
+       struct elf_note note;
+
+       note.n_namesz = 0;
+       note.n_descsz = 0;
+       note.n_type   = 0;
+       memcpy(buf, &note, sizeof(note));
+}
+
+void crash_save_cpu(struct pt_regs *regs, int cpu)
+{
+       struct elf_prstatus prstatus;
+       u32 *buf;
+
+       if ((cpu < 0) || (cpu >= NR_CPUS))
+               return;
+
+       /* Using ELF notes here is opportunistic.
+        * I need a well defined structure format
+        * for the data I pass, and I need tags
+        * on the data to indicate what information I have
+        * squirrelled away.  ELF notes happen to provide
+        * all of that, so there is no need to invent something new.
+        */
+       buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+       if (!buf)
+               return;
+       memset(&prstatus, 0, sizeof(prstatus));
+       prstatus.pr_pid = current->pid;
+       elf_core_copy_regs(&prstatus.pr_reg, regs);
+       buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
+                             &prstatus, sizeof(prstatus));
+       final_note(buf);
+}
+
+static int __init crash_notes_memory_init(void)
+{
+       /* Allocate memory for saving cpu registers. */
+       crash_notes = alloc_percpu(note_buf_t);
+       if (!crash_notes) {
+               printk("Kexec: Memory allocation for saving cpu register"
+               " states failed\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+module_init(crash_notes_memory_init)
+
+void crash_save_vmcoreinfo(void)
+{
+       u32 *buf;
+
+       if (!vmcoreinfo_size)
+               return;
+
+       vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+
+       buf = (u32 *)vmcoreinfo_note;
+
+       buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
+                             vmcoreinfo_size);
+
+       final_note(buf);
+}
+
+void vmcoreinfo_append_str(const char *fmt, ...)
+{
+       va_list args;
+       char buf[0x50];
+       int r;
+
+       va_start(args, fmt);
+       r = vsnprintf(buf, sizeof(buf), fmt, args);
+       va_end(args);
+
+       if (r + vmcoreinfo_size > vmcoreinfo_max_size)
+               r = vmcoreinfo_max_size - vmcoreinfo_size;
+
+       memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
+
+       vmcoreinfo_size += r;
+}
+
+/*
+ * provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
+{}
+
+unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
+{
+       return __pa((unsigned long)(char *)&vmcoreinfo_note);
+}
+
+static int __init crash_save_vmcoreinfo_init(void)
+{
+       vmcoreinfo_append_str("OSRELEASE=%s\n", init_uts_ns.name.release);
+       vmcoreinfo_append_str("PAGESIZE=%ld\n", PAGE_SIZE);
+
+       VMCOREINFO_SYMBOL(init_uts_ns);
+       VMCOREINFO_SYMBOL(node_online_map);
+       VMCOREINFO_SYMBOL(swapper_pg_dir);
+       VMCOREINFO_SYMBOL(_stext);
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+       VMCOREINFO_SYMBOL(mem_map);
+       VMCOREINFO_SYMBOL(contig_page_data);
+#endif
+#ifdef CONFIG_SPARSEMEM
+       VMCOREINFO_SYMBOL(mem_section);
+       VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+       VMCOREINFO_SIZE(mem_section);
+       VMCOREINFO_OFFSET(mem_section, section_mem_map);
+#endif
+       VMCOREINFO_SIZE(page);
+       VMCOREINFO_SIZE(pglist_data);
+       VMCOREINFO_SIZE(zone);
+       VMCOREINFO_SIZE(free_area);
+       VMCOREINFO_SIZE(list_head);
+       VMCOREINFO_TYPEDEF_SIZE(nodemask_t);
+       VMCOREINFO_OFFSET(page, flags);
+       VMCOREINFO_OFFSET(page, _count);
+       VMCOREINFO_OFFSET(page, mapping);
+       VMCOREINFO_OFFSET(page, lru);
+       VMCOREINFO_OFFSET(pglist_data, node_zones);
+       VMCOREINFO_OFFSET(pglist_data, nr_zones);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+       VMCOREINFO_OFFSET(pglist_data, node_mem_map);
+#endif
+       VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
+       VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
+       VMCOREINFO_OFFSET(pglist_data, node_id);
+       VMCOREINFO_OFFSET(zone, free_area);
+       VMCOREINFO_OFFSET(zone, vm_stat);
+       VMCOREINFO_OFFSET(zone, spanned_pages);
+       VMCOREINFO_OFFSET(free_area, free_list);
+       VMCOREINFO_OFFSET(list_head, next);
+       VMCOREINFO_OFFSET(list_head, prev);
+       VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+       VMCOREINFO_NUMBER(NR_FREE_PAGES);
+
+       arch_crash_save_vmcoreinfo();
+
+       return 0;
+}
+
+module_init(crash_save_vmcoreinfo_init)