2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/bootmem.h>
23 #include <linux/interrupt.h>
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/watch.h>
43 #include <asm/types.h>
44 #include <asm/stacktrace.h>
46 extern asmlinkage void handle_int(void);
47 extern asmlinkage void handle_tlbm(void);
48 extern asmlinkage void handle_tlbl(void);
49 extern asmlinkage void handle_tlbs(void);
50 extern asmlinkage void handle_adel(void);
51 extern asmlinkage void handle_ades(void);
52 extern asmlinkage void handle_ibe(void);
53 extern asmlinkage void handle_dbe(void);
54 extern asmlinkage void handle_sys(void);
55 extern asmlinkage void handle_bp(void);
56 extern asmlinkage void handle_ri(void);
57 extern asmlinkage void handle_ri_rdhwr_vivt(void);
58 extern asmlinkage void handle_ri_rdhwr(void);
59 extern asmlinkage void handle_cpu(void);
60 extern asmlinkage void handle_ov(void);
61 extern asmlinkage void handle_tr(void);
62 extern asmlinkage void handle_fpe(void);
63 extern asmlinkage void handle_mdmx(void);
64 extern asmlinkage void handle_watch(void);
65 extern asmlinkage void handle_mt(void);
66 extern asmlinkage void handle_dsp(void);
67 extern asmlinkage void handle_mcheck(void);
68 extern asmlinkage void handle_reserved(void);
70 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
71 struct mips_fpu_struct *ctx, int has_fpu);
73 void (*board_be_init)(void);
74 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
75 void (*board_nmi_handler_setup)(void);
76 void (*board_ejtag_handler_setup)(void);
77 void (*board_bind_eic_interrupt)(int irq, int regset);
80 static void show_raw_backtrace(unsigned long reg29)
82 unsigned long *sp = (unsigned long *)reg29;
85 printk("Call Trace:");
86 #ifdef CONFIG_KALLSYMS
89 while (!kstack_end(sp)) {
91 if (__kernel_text_address(addr))
97 #ifdef CONFIG_KALLSYMS
99 static int __init set_raw_show_trace(char *str)
104 __setup("raw_show_trace", set_raw_show_trace);
107 static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
109 unsigned long sp = regs->regs[29];
110 unsigned long ra = regs->regs[31];
111 unsigned long pc = regs->cp0_epc;
113 if (raw_show_trace || !__kernel_text_address(pc)) {
114 show_raw_backtrace(sp);
117 printk("Call Trace:\n");
120 pc = unwind_stack(task, &sp, pc, &ra);
126 * This routine abuses get_user()/put_user() to reference pointers
127 * with at least a bit of error checking ...
129 static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
131 const int field = 2 * sizeof(unsigned long);
134 unsigned long *sp = (unsigned long *)regs->regs[29];
138 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
139 if (i && ((i % (64 / field)) == 0))
146 if (__get_user(stackdata, sp++)) {
147 printk(" (Bad stack address)");
151 printk(" %0*lx", field, stackdata);
155 show_backtrace(task, regs);
158 void show_stack(struct task_struct *task, unsigned long *sp)
162 regs.regs[29] = (unsigned long)sp;
166 if (task && task != current) {
167 regs.regs[29] = task->thread.reg29;
169 regs.cp0_epc = task->thread.reg31;
171 prepare_frametrace(®s);
174 show_stacktrace(task, ®s);
178 * The architecture-independent dump_stack generator
180 void dump_stack(void)
184 prepare_frametrace(®s);
185 show_backtrace(current, ®s);
188 EXPORT_SYMBOL(dump_stack);
190 void show_code(unsigned int *pc)
196 for(i = -3 ; i < 6 ; i++) {
198 if (__get_user(insn, pc + i)) {
199 printk(" (Bad address in epc)\n");
202 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
206 void show_regs(struct pt_regs *regs)
208 const int field = 2 * sizeof(unsigned long);
209 unsigned int cause = regs->cp0_cause;
212 printk("Cpu %d\n", smp_processor_id());
215 * Saved main processor registers
217 for (i = 0; i < 32; ) {
221 printk(" %0*lx", field, 0UL);
222 else if (i == 26 || i == 27)
223 printk(" %*s", field, "");
225 printk(" %0*lx", field, regs->regs[i]);
232 printk("Hi : %0*lx\n", field, regs->hi);
233 printk("Lo : %0*lx\n", field, regs->lo);
236 * Saved cp0 registers
238 printk("epc : %0*lx ", field, regs->cp0_epc);
239 print_symbol("%s ", regs->cp0_epc);
240 printk(" %s\n", print_tainted());
241 printk("ra : %0*lx ", field, regs->regs[31]);
242 print_symbol("%s\n", regs->regs[31]);
244 printk("Status: %08x ", (uint32_t) regs->cp0_status);
246 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
247 if (regs->cp0_status & ST0_KUO)
249 if (regs->cp0_status & ST0_IEO)
251 if (regs->cp0_status & ST0_KUP)
253 if (regs->cp0_status & ST0_IEP)
255 if (regs->cp0_status & ST0_KUC)
257 if (regs->cp0_status & ST0_IEC)
260 if (regs->cp0_status & ST0_KX)
262 if (regs->cp0_status & ST0_SX)
264 if (regs->cp0_status & ST0_UX)
266 switch (regs->cp0_status & ST0_KSU) {
271 printk("SUPERVISOR ");
280 if (regs->cp0_status & ST0_ERL)
282 if (regs->cp0_status & ST0_EXL)
284 if (regs->cp0_status & ST0_IE)
289 printk("Cause : %08x\n", cause);
291 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
292 if (1 <= cause && cause <= 5)
293 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
295 printk("PrId : %08x\n", read_c0_prid());
298 void show_registers(struct pt_regs *regs)
302 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
303 current->comm, current->pid, current_thread_info(), current);
304 show_stacktrace(current, regs);
305 show_code((unsigned int *) regs->cp0_epc);
309 static DEFINE_SPINLOCK(die_lock);
311 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
313 static int die_counter;
314 #ifdef CONFIG_MIPS_MT_SMTC
315 unsigned long dvpret = dvpe();
316 #endif /* CONFIG_MIPS_MT_SMTC */
319 spin_lock_irq(&die_lock);
321 #ifdef CONFIG_MIPS_MT_SMTC
322 mips_mt_regdump(dvpret);
323 #endif /* CONFIG_MIPS_MT_SMTC */
324 printk("%s[#%d]:\n", str, ++die_counter);
325 show_registers(regs);
326 spin_unlock_irq(&die_lock);
329 panic("Fatal exception in interrupt");
332 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
334 panic("Fatal exception");
340 extern const struct exception_table_entry __start___dbe_table[];
341 extern const struct exception_table_entry __stop___dbe_table[];
344 " .section __dbe_table, \"a\"\n"
347 /* Given an address, look for it in the exception tables. */
348 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
350 const struct exception_table_entry *e;
352 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
354 e = search_module_dbetables(addr);
358 asmlinkage void do_be(struct pt_regs *regs)
360 const int field = 2 * sizeof(unsigned long);
361 const struct exception_table_entry *fixup = NULL;
362 int data = regs->cp0_cause & 4;
363 int action = MIPS_BE_FATAL;
365 /* XXX For now. Fixme, this searches the wrong table ... */
366 if (data && !user_mode(regs))
367 fixup = search_dbe_tables(exception_epc(regs));
370 action = MIPS_BE_FIXUP;
372 if (board_be_handler)
373 action = board_be_handler(regs, fixup != 0);
376 case MIPS_BE_DISCARD:
380 regs->cp0_epc = fixup->nextinsn;
389 * Assume it would be too dangerous to continue ...
391 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
392 data ? "Data" : "Instruction",
393 field, regs->cp0_epc, field, regs->regs[31]);
394 die_if_kernel("Oops", regs);
395 force_sig(SIGBUS, current);
402 #define OPCODE 0xfc000000
403 #define BASE 0x03e00000
404 #define RT 0x001f0000
405 #define OFFSET 0x0000ffff
406 #define LL 0xc0000000
407 #define SC 0xe0000000
408 #define SPEC3 0x7c000000
409 #define RD 0x0000f800
410 #define FUNC 0x0000003f
411 #define RDHWR 0x0000003b
414 * The ll_bit is cleared by r*_switch.S
417 unsigned long ll_bit;
419 static struct task_struct *ll_task = NULL;
421 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
423 unsigned long value, __user *vaddr;
428 * analyse the ll instruction that just caused a ri exception
429 * and put the referenced address to addr.
432 /* sign extend offset */
433 offset = opcode & OFFSET;
437 vaddr = (unsigned long __user *)
438 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
440 if ((unsigned long)vaddr & 3) {
444 if (get_user(value, vaddr)) {
451 if (ll_task == NULL || ll_task == current) {
460 compute_return_epc(regs);
462 regs->regs[(opcode & RT) >> 16] = value;
467 force_sig(signal, current);
470 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
472 unsigned long __user *vaddr;
478 * analyse the sc instruction that just caused a ri exception
479 * and put the referenced address to addr.
482 /* sign extend offset */
483 offset = opcode & OFFSET;
487 vaddr = (unsigned long __user *)
488 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
489 reg = (opcode & RT) >> 16;
491 if ((unsigned long)vaddr & 3) {
498 if (ll_bit == 0 || ll_task != current) {
499 compute_return_epc(regs);
507 if (put_user(regs->regs[reg], vaddr)) {
512 compute_return_epc(regs);
518 force_sig(signal, current);
522 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
523 * opcodes are supposed to result in coprocessor unusable exceptions if
524 * executed on ll/sc-less processors. That's the theory. In practice a
525 * few processors such as NEC's VR4100 throw reserved instruction exceptions
526 * instead, so we're doing the emulation thing in both exception handlers.
528 static inline int simulate_llsc(struct pt_regs *regs)
532 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
535 if ((opcode & OPCODE) == LL) {
536 simulate_ll(regs, opcode);
539 if ((opcode & OPCODE) == SC) {
540 simulate_sc(regs, opcode);
544 return -EFAULT; /* Strange things going on ... */
547 force_sig(SIGSEGV, current);
552 * Simulate trapping 'rdhwr' instructions to provide user accessible
553 * registers not implemented in hardware. The only current use of this
554 * is the thread area pointer.
556 static inline int simulate_rdhwr(struct pt_regs *regs)
558 struct thread_info *ti = task_thread_info(current);
561 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
564 if (unlikely(compute_return_epc(regs)))
567 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
568 int rd = (opcode & RD) >> 11;
569 int rt = (opcode & RT) >> 16;
572 regs->regs[rt] = ti->tp_value;
583 force_sig(SIGSEGV, current);
587 asmlinkage void do_ov(struct pt_regs *regs)
591 die_if_kernel("Integer overflow", regs);
593 info.si_code = FPE_INTOVF;
594 info.si_signo = SIGFPE;
596 info.si_addr = (void __user *) regs->cp0_epc;
597 force_sig_info(SIGFPE, &info, current);
601 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
603 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
605 die_if_kernel("FP exception in kernel code", regs);
607 if (fcr31 & FPU_CSR_UNI_X) {
612 #ifdef CONFIG_PREEMPT
613 if (!is_fpu_owner()) {
614 /* We might lose fpu before disabling preempt... */
616 BUG_ON(!used_math());
621 * Unimplemented operation exception. If we've got the full
622 * software emulator on-board, let's use it...
624 * Force FPU to dump state into task/thread context. We're
625 * moving a lot of data here for what is probably a single
626 * instruction, but the alternative is to pre-decode the FP
627 * register operands before invoking the emulator, which seems
628 * a bit extreme for what should be an infrequent event.
631 /* Ensure 'resume' not overwrite saved fp context again. */
636 /* Run the emulator */
637 sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu, 1);
641 own_fpu(); /* Using the FPU again. */
643 * We can't allow the emulated instruction to leave any of
644 * the cause bit set in $fcr31.
646 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
648 /* Restore the hardware register state */
653 /* If something went wrong, signal */
655 force_sig(sig, current);
660 force_sig(SIGFPE, current);
663 asmlinkage void do_bp(struct pt_regs *regs)
665 unsigned int opcode, bcode;
668 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
672 * There is the ancient bug in the MIPS assemblers that the break
673 * code starts left to bit 16 instead to bit 6 in the opcode.
674 * Gas is bug-compatible, but not always, grrr...
675 * We handle both cases with a simple heuristics. --macro
677 bcode = ((opcode >> 6) & ((1 << 20) - 1));
678 if (bcode < (1 << 10))
682 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
683 * insns, even for break codes that indicate arithmetic failures.
685 * But should we continue the brokenness??? --macro
688 case BRK_OVERFLOW << 10:
689 case BRK_DIVZERO << 10:
690 die_if_kernel("Break instruction in kernel code", regs);
691 if (bcode == (BRK_DIVZERO << 10))
692 info.si_code = FPE_INTDIV;
694 info.si_code = FPE_INTOVF;
695 info.si_signo = SIGFPE;
697 info.si_addr = (void __user *) regs->cp0_epc;
698 force_sig_info(SIGFPE, &info, current);
701 die("Kernel bug detected", regs);
704 die_if_kernel("Break instruction in kernel code", regs);
705 force_sig(SIGTRAP, current);
709 force_sig(SIGSEGV, current);
712 asmlinkage void do_tr(struct pt_regs *regs)
714 unsigned int opcode, tcode = 0;
717 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
720 /* Immediate versions don't provide a code. */
721 if (!(opcode & OPCODE))
722 tcode = ((opcode >> 6) & ((1 << 10) - 1));
725 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
726 * insns, even for trap codes that indicate arithmetic failures.
728 * But should we continue the brokenness??? --macro
733 die_if_kernel("Trap instruction in kernel code", regs);
734 if (tcode == BRK_DIVZERO)
735 info.si_code = FPE_INTDIV;
737 info.si_code = FPE_INTOVF;
738 info.si_signo = SIGFPE;
740 info.si_addr = (void __user *) regs->cp0_epc;
741 force_sig_info(SIGFPE, &info, current);
744 die("Kernel bug detected", regs);
747 die_if_kernel("Trap instruction in kernel code", regs);
748 force_sig(SIGTRAP, current);
752 force_sig(SIGSEGV, current);
755 asmlinkage void do_ri(struct pt_regs *regs)
757 die_if_kernel("Reserved instruction in kernel code", regs);
760 if (!simulate_llsc(regs))
763 if (!simulate_rdhwr(regs))
766 force_sig(SIGILL, current);
769 asmlinkage void do_cpu(struct pt_regs *regs)
773 die_if_kernel("do_cpu invoked from kernel context!", regs);
775 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
780 if (!simulate_llsc(regs))
783 if (!simulate_rdhwr(regs))
792 if (used_math()) { /* Using the FPU again. */
794 } else { /* First time FPU user. */
804 sig = fpu_emulator_cop1Handler(regs,
805 ¤t->thread.fpu, 0);
807 force_sig(sig, current);
808 #ifdef CONFIG_MIPS_MT_FPAFF
811 * MIPS MT processors may have fewer FPU contexts
812 * than CPU threads. If we've emulated more than
813 * some threshold number of instructions, force
814 * migration to a "CPU" that has FP support.
816 if(mt_fpemul_threshold > 0
817 && ((current->thread.emulated_fp++
818 > mt_fpemul_threshold))) {
820 * If there's no FPU present, or if the
821 * application has already restricted
822 * the allowed set to exclude any CPUs
823 * with FPUs, we'll skip the procedure.
825 if (cpus_intersects(current->cpus_allowed,
830 current->thread.user_cpus_allowed,
832 set_cpus_allowed(current, tmask);
833 current->thread.mflags |= MF_FPUBOUND;
837 #endif /* CONFIG_MIPS_MT_FPAFF */
844 die_if_kernel("do_cpu invoked from kernel context!", regs);
848 force_sig(SIGILL, current);
851 asmlinkage void do_mdmx(struct pt_regs *regs)
853 force_sig(SIGILL, current);
856 asmlinkage void do_watch(struct pt_regs *regs)
859 * We use the watch exception where available to detect stack
864 panic("Caught WATCH exception - probably caused by stack overflow.");
867 asmlinkage void do_mcheck(struct pt_regs *regs)
869 const int field = 2 * sizeof(unsigned long);
870 int multi_match = regs->cp0_status & ST0_TS;
875 printk("Index : %0x\n", read_c0_index());
876 printk("Pagemask: %0x\n", read_c0_pagemask());
877 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
878 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
879 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
884 show_code((unsigned int *) regs->cp0_epc);
887 * Some chips may have other causes of machine check (e.g. SB1
890 panic("Caught Machine Check exception - %scaused by multiple "
891 "matching entries in the TLB.",
892 (multi_match) ? "" : "not ");
895 asmlinkage void do_mt(struct pt_regs *regs)
899 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
900 >> VPECONTROL_EXCPT_SHIFT;
903 printk(KERN_DEBUG "Thread Underflow\n");
906 printk(KERN_DEBUG "Thread Overflow\n");
909 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
912 printk(KERN_DEBUG "Gating Storage Exception\n");
915 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
918 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
921 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
925 die_if_kernel("MIPS MT Thread exception in kernel", regs);
927 force_sig(SIGILL, current);
931 asmlinkage void do_dsp(struct pt_regs *regs)
934 panic("Unexpected DSP exception\n");
936 force_sig(SIGILL, current);
939 asmlinkage void do_reserved(struct pt_regs *regs)
942 * Game over - no way to handle this if it ever occurs. Most probably
943 * caused by a new unknown cpu type or after another deadly
944 * hard/software error.
947 panic("Caught reserved exception %ld - should not happen.",
948 (regs->cp0_cause & 0x7f) >> 2);
951 asmlinkage void do_default_vi(struct pt_regs *regs)
954 panic("Caught unexpected vectored interrupt.");
958 * Some MIPS CPUs can enable/disable for cache parity detection, but do
961 static inline void parity_protection_init(void)
963 switch (current_cpu_data.cputype) {
967 write_c0_ecc(0x80000000);
968 back_to_back_c0_hazard();
969 /* Set the PE bit (bit 31) in the c0_errctl register. */
970 printk(KERN_INFO "Cache parity protection %sabled\n",
971 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
975 /* Clear the DE bit (bit 16) in the c0_status register. */
976 printk(KERN_INFO "Enable cache parity protection for "
977 "MIPS 20KC/25KF CPUs.\n");
978 clear_c0_status(ST0_DE);
985 asmlinkage void cache_parity_error(void)
987 const int field = 2 * sizeof(unsigned long);
988 unsigned int reg_val;
990 /* For the moment, report the problem and hang. */
991 printk("Cache error exception:\n");
992 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
993 reg_val = read_c0_cacheerr();
994 printk("c0_cacheerr == %08x\n", reg_val);
996 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
997 reg_val & (1<<30) ? "secondary" : "primary",
998 reg_val & (1<<31) ? "data" : "insn");
999 printk("Error bits: %s%s%s%s%s%s%s\n",
1000 reg_val & (1<<29) ? "ED " : "",
1001 reg_val & (1<<28) ? "ET " : "",
1002 reg_val & (1<<26) ? "EE " : "",
1003 reg_val & (1<<25) ? "EB " : "",
1004 reg_val & (1<<24) ? "EI " : "",
1005 reg_val & (1<<23) ? "E1 " : "",
1006 reg_val & (1<<22) ? "E0 " : "");
1007 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1009 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1010 if (reg_val & (1<<22))
1011 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1013 if (reg_val & (1<<23))
1014 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1017 panic("Can't handle the cache error!");
1021 * SDBBP EJTAG debug exception handler.
1022 * We skip the instruction and return to the next instruction.
1024 void ejtag_exception_handler(struct pt_regs *regs)
1026 const int field = 2 * sizeof(unsigned long);
1027 unsigned long depc, old_epc;
1030 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1031 depc = read_c0_depc();
1032 debug = read_c0_debug();
1033 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1034 if (debug & 0x80000000) {
1036 * In branch delay slot.
1037 * We cheat a little bit here and use EPC to calculate the
1038 * debug return address (DEPC). EPC is restored after the
1041 old_epc = regs->cp0_epc;
1042 regs->cp0_epc = depc;
1043 __compute_return_epc(regs);
1044 depc = regs->cp0_epc;
1045 regs->cp0_epc = old_epc;
1048 write_c0_depc(depc);
1051 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1052 write_c0_debug(debug | 0x100);
1057 * NMI exception handler.
1059 void nmi_exception_handler(struct pt_regs *regs)
1061 #ifdef CONFIG_MIPS_MT_SMTC
1062 unsigned long dvpret = dvpe();
1064 printk("NMI taken!!!!\n");
1065 mips_mt_regdump(dvpret);
1068 printk("NMI taken!!!!\n");
1069 #endif /* CONFIG_MIPS_MT_SMTC */
1074 #define VECTORSPACING 0x100 /* for EI/VI mode */
1076 unsigned long ebase;
1077 unsigned long exception_handlers[32];
1078 unsigned long vi_handlers[64];
1081 * As a side effect of the way this is implemented we're limited
1082 * to interrupt handlers in the address range from
1083 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
1085 void *set_except_vector(int n, void *addr)
1087 unsigned long handler = (unsigned long) addr;
1088 unsigned long old_handler = exception_handlers[n];
1090 exception_handlers[n] = handler;
1091 if (n == 0 && cpu_has_divec) {
1092 *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
1093 (0x03ffffff & (handler >> 2));
1094 flush_icache_range(ebase + 0x200, ebase + 0x204);
1096 return (void *)old_handler;
1099 #ifdef CONFIG_CPU_MIPSR2_SRS
1101 * MIPSR2 shadow register set allocation
1105 static struct shadow_registers {
1107 * Number of shadow register sets supported
1109 unsigned long sr_supported;
1111 * Bitmap of allocated shadow registers
1113 unsigned long sr_allocated;
1116 static void mips_srs_init(void)
1118 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1119 printk(KERN_INFO "%ld MIPSR2 register sets available\n",
1120 shadow_registers.sr_supported);
1121 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
1124 int mips_srs_max(void)
1126 return shadow_registers.sr_supported;
1129 int mips_srs_alloc(void)
1131 struct shadow_registers *sr = &shadow_registers;
1135 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1136 if (set >= sr->sr_supported)
1139 if (test_and_set_bit(set, &sr->sr_allocated))
1145 void mips_srs_free(int set)
1147 struct shadow_registers *sr = &shadow_registers;
1149 clear_bit(set, &sr->sr_allocated);
1152 static void *set_vi_srs_handler(int n, void *addr, int srs)
1154 unsigned long handler;
1155 unsigned long old_handler = vi_handlers[n];
1159 if (!cpu_has_veic && !cpu_has_vint)
1163 handler = (unsigned long) do_default_vi;
1166 handler = (unsigned long) addr;
1167 vi_handlers[n] = (unsigned long) addr;
1169 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1171 if (srs >= mips_srs_max())
1172 panic("Shadow register set %d not supported", srs);
1175 if (board_bind_eic_interrupt)
1176 board_bind_eic_interrupt (n, srs);
1177 } else if (cpu_has_vint) {
1178 /* SRSMap is only defined if shadow sets are implemented */
1179 if (mips_srs_max() > 1)
1180 change_c0_srsmap (0xf << n*4, srs << n*4);
1185 * If no shadow set is selected then use the default handler
1186 * that does normal register saving and a standard interrupt exit
1189 extern char except_vec_vi, except_vec_vi_lui;
1190 extern char except_vec_vi_ori, except_vec_vi_end;
1191 #ifdef CONFIG_MIPS_MT_SMTC
1193 * We need to provide the SMTC vectored interrupt handler
1194 * not only with the address of the handler, but with the
1195 * Status.IM bit to be masked before going there.
1197 extern char except_vec_vi_mori;
1198 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1199 #endif /* CONFIG_MIPS_MT_SMTC */
1200 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1201 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1202 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1204 if (handler_len > VECTORSPACING) {
1206 * Sigh... panicing won't help as the console
1207 * is probably not configured :(
1209 panic ("VECTORSPACING too small");
1212 memcpy (b, &except_vec_vi, handler_len);
1213 #ifdef CONFIG_MIPS_MT_SMTC
1215 printk("Vector index %d exceeds SMTC maximum\n", n);
1216 w = (u32 *)(b + mori_offset);
1217 *w = (*w & 0xffff0000) | (0x100 << n);
1218 #endif /* CONFIG_MIPS_MT_SMTC */
1219 w = (u32 *)(b + lui_offset);
1220 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1221 w = (u32 *)(b + ori_offset);
1222 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1223 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1227 * In other cases jump directly to the interrupt handler
1229 * It is the handlers responsibility to save registers if required
1230 * (eg hi/lo) and return from the exception using "eret"
1233 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1235 flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1238 return (void *)old_handler;
1241 void *set_vi_handler(int n, void *addr)
1243 return set_vi_srs_handler(n, addr, 0);
1248 static inline void mips_srs_init(void)
1252 #endif /* CONFIG_CPU_MIPSR2_SRS */
1255 * This is used by native signal handling
1257 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
1258 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
1260 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
1261 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1263 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1264 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1267 static int smp_save_fp_context(struct sigcontext *sc)
1270 ? _save_fp_context(sc)
1271 : fpu_emulator_save_context(sc);
1274 static int smp_restore_fp_context(struct sigcontext *sc)
1277 ? _restore_fp_context(sc)
1278 : fpu_emulator_restore_context(sc);
1282 static inline void signal_init(void)
1285 /* For now just do the cpu_has_fpu check when the functions are invoked */
1286 save_fp_context = smp_save_fp_context;
1287 restore_fp_context = smp_restore_fp_context;
1290 save_fp_context = _save_fp_context;
1291 restore_fp_context = _restore_fp_context;
1293 save_fp_context = fpu_emulator_save_context;
1294 restore_fp_context = fpu_emulator_restore_context;
1299 #ifdef CONFIG_MIPS32_COMPAT
1302 * This is used by 32-bit signal stuff on the 64-bit kernel
1304 asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
1305 asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
1307 extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
1308 extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
1310 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
1311 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
1313 static inline void signal32_init(void)
1316 save_fp_context32 = _save_fp_context32;
1317 restore_fp_context32 = _restore_fp_context32;
1319 save_fp_context32 = fpu_emulator_save_context32;
1320 restore_fp_context32 = fpu_emulator_restore_context32;
1325 extern void cpu_cache_init(void);
1326 extern void tlb_init(void);
1327 extern void flush_tlb_handlers(void);
1329 void __init per_cpu_trap_init(void)
1331 unsigned int cpu = smp_processor_id();
1332 unsigned int status_set = ST0_CU0;
1333 #ifdef CONFIG_MIPS_MT_SMTC
1334 int secondaryTC = 0;
1335 int bootTC = (cpu == 0);
1338 * Only do per_cpu_trap_init() for first TC of Each VPE.
1339 * Note that this hack assumes that the SMTC init code
1340 * assigns TCs consecutively and in ascending order.
1343 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1344 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1346 #endif /* CONFIG_MIPS_MT_SMTC */
1349 * Disable coprocessors and select 32-bit or 64-bit addressing
1350 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1351 * flag that some firmware may have left set and the TS bit (for
1352 * IP27). Set XX for ISA IV code to work.
1355 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1357 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1358 status_set |= ST0_XX;
1359 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1363 set_c0_status(ST0_MX);
1365 #ifdef CONFIG_CPU_MIPSR2
1366 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1369 #ifdef CONFIG_MIPS_MT_SMTC
1371 #endif /* CONFIG_MIPS_MT_SMTC */
1374 * Interrupt handling.
1376 if (cpu_has_veic || cpu_has_vint) {
1377 write_c0_ebase (ebase);
1378 /* Setting vector spacing enables EI/VI mode */
1379 change_c0_intctl (0x3e0, VECTORSPACING);
1381 if (cpu_has_divec) {
1382 if (cpu_has_mipsmt) {
1383 unsigned int vpflags = dvpe();
1384 set_c0_cause(CAUSEF_IV);
1387 set_c0_cause(CAUSEF_IV);
1389 #ifdef CONFIG_MIPS_MT_SMTC
1391 #endif /* CONFIG_MIPS_MT_SMTC */
1393 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1394 TLBMISS_HANDLER_SETUP();
1396 atomic_inc(&init_mm.mm_count);
1397 current->active_mm = &init_mm;
1398 BUG_ON(current->mm);
1399 enter_lazy_tlb(&init_mm, current);
1401 #ifdef CONFIG_MIPS_MT_SMTC
1403 #endif /* CONFIG_MIPS_MT_SMTC */
1406 #ifdef CONFIG_MIPS_MT_SMTC
1408 #endif /* CONFIG_MIPS_MT_SMTC */
1411 /* Install CPU exception handler */
1412 void __init set_handler (unsigned long offset, void *addr, unsigned long size)
1414 memcpy((void *)(ebase + offset), addr, size);
1415 flush_icache_range(ebase + offset, ebase + offset + size);
1418 /* Install uncached CPU exception handler */
1419 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
1422 unsigned long uncached_ebase = KSEG1ADDR(ebase);
1425 unsigned long uncached_ebase = TO_UNCAC(ebase);
1428 memcpy((void *)(uncached_ebase + offset), addr, size);
1431 static int __initdata rdhwr_noopt;
1432 static int __init set_rdhwr_noopt(char *str)
1438 __setup("rdhwr_noopt", set_rdhwr_noopt);
1440 void __init trap_init(void)
1442 extern char except_vec3_generic, except_vec3_r4000;
1443 extern char except_vec4;
1446 if (cpu_has_veic || cpu_has_vint)
1447 ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
1453 per_cpu_trap_init();
1456 * Copy the generic exception handlers to their final destination.
1457 * This will be overriden later as suitable for a particular
1460 set_handler(0x180, &except_vec3_generic, 0x80);
1463 * Setup default vectors
1465 for (i = 0; i <= 31; i++)
1466 set_except_vector(i, handle_reserved);
1469 * Copy the EJTAG debug exception vector handler code to it's final
1472 if (cpu_has_ejtag && board_ejtag_handler_setup)
1473 board_ejtag_handler_setup ();
1476 * Only some CPUs have the watch exceptions.
1479 set_except_vector(23, handle_watch);
1482 * Initialise interrupt handlers
1484 if (cpu_has_veic || cpu_has_vint) {
1485 int nvec = cpu_has_veic ? 64 : 8;
1486 for (i = 0; i < nvec; i++)
1487 set_vi_handler(i, NULL);
1489 else if (cpu_has_divec)
1490 set_handler(0x200, &except_vec4, 0x8);
1493 * Some CPUs can enable/disable for cache parity detection, but does
1494 * it different ways.
1496 parity_protection_init();
1499 * The Data Bus Errors / Instruction Bus Errors are signaled
1500 * by external hardware. Therefore these two exceptions
1501 * may have board specific handlers.
1506 set_except_vector(0, handle_int);
1507 set_except_vector(1, handle_tlbm);
1508 set_except_vector(2, handle_tlbl);
1509 set_except_vector(3, handle_tlbs);
1511 set_except_vector(4, handle_adel);
1512 set_except_vector(5, handle_ades);
1514 set_except_vector(6, handle_ibe);
1515 set_except_vector(7, handle_dbe);
1517 set_except_vector(8, handle_sys);
1518 set_except_vector(9, handle_bp);
1519 set_except_vector(10, rdhwr_noopt ? handle_ri :
1520 (cpu_has_vtag_icache ?
1521 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1522 set_except_vector(11, handle_cpu);
1523 set_except_vector(12, handle_ov);
1524 set_except_vector(13, handle_tr);
1526 if (current_cpu_data.cputype == CPU_R6000 ||
1527 current_cpu_data.cputype == CPU_R6000A) {
1529 * The R6000 is the only R-series CPU that features a machine
1530 * check exception (similar to the R4000 cache error) and
1531 * unaligned ldc1/sdc1 exception. The handlers have not been
1532 * written yet. Well, anyway there is no R6000 machine on the
1533 * current list of targets for Linux/MIPS.
1534 * (Duh, crap, there is someone with a triple R6k machine)
1536 //set_except_vector(14, handle_mc);
1537 //set_except_vector(15, handle_ndc);
1541 if (board_nmi_handler_setup)
1542 board_nmi_handler_setup();
1544 if (cpu_has_fpu && !cpu_has_nofpuex)
1545 set_except_vector(15, handle_fpe);
1547 set_except_vector(22, handle_mdmx);
1550 set_except_vector(24, handle_mcheck);
1553 set_except_vector(25, handle_mt);
1556 set_except_vector(26, handle_dsp);
1559 /* Special exception: R4[04]00 uses also the divec space. */
1560 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1561 else if (cpu_has_4kex)
1562 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1564 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1567 #ifdef CONFIG_MIPS32_COMPAT
1571 flush_icache_range(ebase, ebase + 0x400);
1572 flush_tlb_handlers();