1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
42 #include <linux/kmod.h>
45 struct notifier_block *sparc64die_chain;
46 static DEFINE_SPINLOCK(die_notifier_lock);
48 int register_die_notifier(struct notifier_block *nb)
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
58 /* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap
60 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
73 static void dump_tl1_traplog(struct tl1_traplog *p)
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
79 for (i = 0; i < 4; i++) {
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n",
84 p->trapstack[i].tstate, p->trapstack[i].tpc,
85 p->trapstack[i].tnpc, p->trapstack[i].tt);
89 void do_call_debug(struct pt_regs *regs)
91 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
94 void bad_trap(struct pt_regs *regs, long lvl)
99 if (notify_die(DIE_TRAP, "bad trap", regs,
100 0, lvl, SIGTRAP) == NOTIFY_STOP)
104 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
105 die_if_kernel(buffer, regs);
109 if (regs->tstate & TSTATE_PRIV) {
110 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
111 die_if_kernel(buffer, regs);
113 if (test_thread_flag(TIF_32BIT)) {
114 regs->tpc &= 0xffffffff;
115 regs->tnpc &= 0xffffffff;
117 info.si_signo = SIGILL;
119 info.si_code = ILL_ILLTRP;
120 info.si_addr = (void __user *)regs->tpc;
121 info.si_trapno = lvl;
122 force_sig_info(SIGILL, &info, current);
125 void bad_trap_tl1(struct pt_regs *regs, long lvl)
129 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
130 0, lvl, SIGTRAP) == NOTIFY_STOP)
133 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
135 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
136 die_if_kernel (buffer, regs);
139 #ifdef CONFIG_DEBUG_BUGVERBOSE
140 void do_BUG(const char *file, int line)
143 printk("kernel BUG at %s:%d!\n", file, line);
147 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 "SFAR[%016lx], going.\n", sfsr, sfar);
158 die_if_kernel("Iax", regs);
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
164 info.si_signo = SIGSEGV;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
169 force_sig_info(SIGSEGV, &info, current);
172 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
179 spitfire_insn_access_exception(regs, sfsr, sfar);
182 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
186 if (notify_die(DIE_TRAP, "data access exception", regs,
187 0, 0x30, SIGTRAP) == NOTIFY_STOP)
190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */
192 const struct exception_table_entry *entry;
194 entry = search_exception_tables(regs->tpc);
196 /* Ouch, somebody is trying VM hole tricks on us... */
197 #ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
200 regs->tpc, entry->fixup);
202 regs->tpc = entry->fixup;
203 regs->tnpc = regs->tpc + 4;
207 printk("spitfire_data_access_exception: SFSR[%016lx] "
208 "SFAR[%016lx], going.\n", sfsr, sfar);
209 die_if_kernel("Dax", regs);
212 info.si_signo = SIGSEGV;
214 info.si_code = SEGV_MAPERR;
215 info.si_addr = (void __user *)sfar;
217 force_sig_info(SIGSEGV, &info, current);
220 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
222 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
223 0, 0x30, SIGTRAP) == NOTIFY_STOP)
226 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
227 spitfire_data_access_exception(regs, sfsr, sfar);
231 /* This is really pathetic... */
232 extern volatile int pci_poke_in_progress;
233 extern volatile int pci_poke_cpu;
234 extern volatile int pci_poke_faulted;
237 /* When access exceptions happen, we must do this. */
238 static void spitfire_clean_and_reenable_l1_caches(void)
242 if (tlb_type != spitfire)
246 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
247 spitfire_put_icache_tag(va, 0x0);
248 spitfire_put_dcache_tag(va, 0x0);
251 /* Re-enable in LSU. */
252 __asm__ __volatile__("flush %%g6\n\t"
254 "stxa %0, [%%g0] %1\n\t"
257 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
258 LSU_CONTROL_IM | LSU_CONTROL_DM),
259 "i" (ASI_LSU_CONTROL)
263 static void spitfire_enable_estate_errors(void)
265 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
268 : "r" (ESTATE_ERR_ALL),
269 "i" (ASI_ESTATE_ERROR_EN));
272 static char ecc_syndrome_table[] = {
273 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
274 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
275 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
276 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
277 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
278 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
279 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
280 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
281 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
282 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
283 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
284 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
285 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
286 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
287 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
288 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
289 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
290 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
291 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
292 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
293 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
294 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
295 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
296 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
297 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
298 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
299 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
300 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
301 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
302 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
303 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
304 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
307 static char *syndrome_unknown = "<Unknown>";
309 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
311 unsigned short scode;
312 char memmod_str[64], *p;
315 scode = ecc_syndrome_table[udbl & 0xff];
316 if (prom_getunumber(scode, afar,
317 memmod_str, sizeof(memmod_str)) == -1)
318 p = syndrome_unknown;
321 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
322 "Memory Module \"%s\"\n",
323 smp_processor_id(), scode, p);
327 scode = ecc_syndrome_table[udbh & 0xff];
328 if (prom_getunumber(scode, afar,
329 memmod_str, sizeof(memmod_str)) == -1)
330 p = syndrome_unknown;
333 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
334 "Memory Module \"%s\"\n",
335 smp_processor_id(), scode, p);
340 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
343 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
344 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
345 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
347 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
349 /* We always log it, even if someone is listening for this
352 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
353 0, TRAP_TYPE_CEE, SIGTRAP);
355 /* The Correctable ECC Error trap does not disable I/D caches. So
356 * we only have to restore the ESTATE Error Enable register.
358 spitfire_enable_estate_errors();
361 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
365 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
366 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
367 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
369 /* XXX add more human friendly logging of the error status
370 * XXX as is implemented for cheetah
373 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
375 /* We always log it, even if someone is listening for this
378 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
381 if (regs->tstate & TSTATE_PRIV) {
383 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
384 die_if_kernel("UE", regs);
387 /* XXX need more intelligent processing here, such as is implemented
388 * XXX for cheetah errors, in fact if the E-cache still holds the
389 * XXX line with bad parity this will loop
392 spitfire_clean_and_reenable_l1_caches();
393 spitfire_enable_estate_errors();
395 if (test_thread_flag(TIF_32BIT)) {
396 regs->tpc &= 0xffffffff;
397 regs->tnpc &= 0xffffffff;
399 info.si_signo = SIGBUS;
401 info.si_code = BUS_OBJERR;
402 info.si_addr = (void *)0;
404 force_sig_info(SIGBUS, &info, current);
407 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
409 unsigned long afsr, tt, udbh, udbl;
412 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
413 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
414 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
415 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
416 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
419 if (tt == TRAP_TYPE_DAE &&
420 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
421 spitfire_clean_and_reenable_l1_caches();
422 spitfire_enable_estate_errors();
424 pci_poke_faulted = 1;
425 regs->tnpc = regs->tpc + 4;
430 if (afsr & SFAFSR_UE)
431 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
433 if (tt == TRAP_TYPE_CEE) {
434 /* Handle the case where we took a CEE trap, but ACK'd
435 * only the UE state in the UDB error registers.
437 if (afsr & SFAFSR_UE) {
438 if (udbh & UDBE_CE) {
439 __asm__ __volatile__(
440 "stxa %0, [%1] %2\n\t"
443 : "r" (udbh & UDBE_CE),
444 "r" (0x0), "i" (ASI_UDB_ERROR_W));
446 if (udbl & UDBE_CE) {
447 __asm__ __volatile__(
448 "stxa %0, [%1] %2\n\t"
451 : "r" (udbl & UDBE_CE),
452 "r" (0x18), "i" (ASI_UDB_ERROR_W));
456 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
460 int cheetah_pcache_forced_on;
462 void cheetah_enable_pcache(void)
466 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
469 __asm__ __volatile__("ldxa [%%g0] %1, %0"
471 : "i" (ASI_DCU_CONTROL_REG));
472 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
473 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
476 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
479 /* Cheetah error trap handling. */
480 static unsigned long ecache_flush_physbase;
481 static unsigned long ecache_flush_linesize;
482 static unsigned long ecache_flush_size;
484 /* WARNING: The error trap handlers in assembly know the precise
485 * layout of the following structure.
487 * C-level handlers below use this information to log the error
488 * and then determine how to recover (if possible).
490 struct cheetah_err_info {
495 /*0x10*/u64 dcache_data[4]; /* The actual data */
496 /*0x30*/u64 dcache_index; /* D-cache index */
497 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
498 /*0x40*/u64 dcache_utag; /* D-cache microtag */
499 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
502 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
503 /*0x90*/u64 icache_index; /* I-cache index */
504 /*0x98*/u64 icache_tag; /* I-cache phys tag */
505 /*0xa0*/u64 icache_utag; /* I-cache microtag */
506 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
507 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
508 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
511 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
512 /*0xe0*/u64 ecache_index; /* E-cache index */
513 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
515 /*0xf0*/u64 __pad[32 - 30];
517 #define CHAFSR_INVALID ((u64)-1L)
519 /* This table is ordered in priority of errors and matches the
520 * AFAR overwrite policy as well.
523 struct afsr_error_table {
528 static const char CHAFSR_PERR_msg[] =
529 "System interface protocol error";
530 static const char CHAFSR_IERR_msg[] =
531 "Internal processor error";
532 static const char CHAFSR_ISAP_msg[] =
533 "System request parity error on incoming addresss";
534 static const char CHAFSR_UCU_msg[] =
535 "Uncorrectable E-cache ECC error for ifetch/data";
536 static const char CHAFSR_UCC_msg[] =
537 "SW Correctable E-cache ECC error for ifetch/data";
538 static const char CHAFSR_UE_msg[] =
539 "Uncorrectable system bus data ECC error for read";
540 static const char CHAFSR_EDU_msg[] =
541 "Uncorrectable E-cache ECC error for stmerge/blkld";
542 static const char CHAFSR_EMU_msg[] =
543 "Uncorrectable system bus MTAG error";
544 static const char CHAFSR_WDU_msg[] =
545 "Uncorrectable E-cache ECC error for writeback";
546 static const char CHAFSR_CPU_msg[] =
547 "Uncorrectable ECC error for copyout";
548 static const char CHAFSR_CE_msg[] =
549 "HW corrected system bus data ECC error for read";
550 static const char CHAFSR_EDC_msg[] =
551 "HW corrected E-cache ECC error for stmerge/blkld";
552 static const char CHAFSR_EMC_msg[] =
553 "HW corrected system bus MTAG ECC error";
554 static const char CHAFSR_WDC_msg[] =
555 "HW corrected E-cache ECC error for writeback";
556 static const char CHAFSR_CPC_msg[] =
557 "HW corrected ECC error for copyout";
558 static const char CHAFSR_TO_msg[] =
559 "Unmapped error from system bus";
560 static const char CHAFSR_BERR_msg[] =
561 "Bus error response from system bus";
562 static const char CHAFSR_IVC_msg[] =
563 "HW corrected system bus data ECC error for ivec read";
564 static const char CHAFSR_IVU_msg[] =
565 "Uncorrectable system bus data ECC error for ivec read";
566 static struct afsr_error_table __cheetah_error_table[] = {
567 { CHAFSR_PERR, CHAFSR_PERR_msg },
568 { CHAFSR_IERR, CHAFSR_IERR_msg },
569 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
570 { CHAFSR_UCU, CHAFSR_UCU_msg },
571 { CHAFSR_UCC, CHAFSR_UCC_msg },
572 { CHAFSR_UE, CHAFSR_UE_msg },
573 { CHAFSR_EDU, CHAFSR_EDU_msg },
574 { CHAFSR_EMU, CHAFSR_EMU_msg },
575 { CHAFSR_WDU, CHAFSR_WDU_msg },
576 { CHAFSR_CPU, CHAFSR_CPU_msg },
577 { CHAFSR_CE, CHAFSR_CE_msg },
578 { CHAFSR_EDC, CHAFSR_EDC_msg },
579 { CHAFSR_EMC, CHAFSR_EMC_msg },
580 { CHAFSR_WDC, CHAFSR_WDC_msg },
581 { CHAFSR_CPC, CHAFSR_CPC_msg },
582 { CHAFSR_TO, CHAFSR_TO_msg },
583 { CHAFSR_BERR, CHAFSR_BERR_msg },
584 /* These two do not update the AFAR. */
585 { CHAFSR_IVC, CHAFSR_IVC_msg },
586 { CHAFSR_IVU, CHAFSR_IVU_msg },
589 static const char CHPAFSR_DTO_msg[] =
590 "System bus unmapped error for prefetch/storequeue-read";
591 static const char CHPAFSR_DBERR_msg[] =
592 "System bus error for prefetch/storequeue-read";
593 static const char CHPAFSR_THCE_msg[] =
594 "Hardware corrected E-cache Tag ECC error";
595 static const char CHPAFSR_TSCE_msg[] =
596 "SW handled correctable E-cache Tag ECC error";
597 static const char CHPAFSR_TUE_msg[] =
598 "Uncorrectable E-cache Tag ECC error";
599 static const char CHPAFSR_DUE_msg[] =
600 "System bus uncorrectable data ECC error due to prefetch/store-fill";
601 static struct afsr_error_table __cheetah_plus_error_table[] = {
602 { CHAFSR_PERR, CHAFSR_PERR_msg },
603 { CHAFSR_IERR, CHAFSR_IERR_msg },
604 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
605 { CHAFSR_UCU, CHAFSR_UCU_msg },
606 { CHAFSR_UCC, CHAFSR_UCC_msg },
607 { CHAFSR_UE, CHAFSR_UE_msg },
608 { CHAFSR_EDU, CHAFSR_EDU_msg },
609 { CHAFSR_EMU, CHAFSR_EMU_msg },
610 { CHAFSR_WDU, CHAFSR_WDU_msg },
611 { CHAFSR_CPU, CHAFSR_CPU_msg },
612 { CHAFSR_CE, CHAFSR_CE_msg },
613 { CHAFSR_EDC, CHAFSR_EDC_msg },
614 { CHAFSR_EMC, CHAFSR_EMC_msg },
615 { CHAFSR_WDC, CHAFSR_WDC_msg },
616 { CHAFSR_CPC, CHAFSR_CPC_msg },
617 { CHAFSR_TO, CHAFSR_TO_msg },
618 { CHAFSR_BERR, CHAFSR_BERR_msg },
619 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
620 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
621 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
622 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
623 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
624 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
625 /* These two do not update the AFAR. */
626 { CHAFSR_IVC, CHAFSR_IVC_msg },
627 { CHAFSR_IVU, CHAFSR_IVU_msg },
630 static const char JPAFSR_JETO_msg[] =
631 "System interface protocol error, hw timeout caused";
632 static const char JPAFSR_SCE_msg[] =
633 "Parity error on system snoop results";
634 static const char JPAFSR_JEIC_msg[] =
635 "System interface protocol error, illegal command detected";
636 static const char JPAFSR_JEIT_msg[] =
637 "System interface protocol error, illegal ADTYPE detected";
638 static const char JPAFSR_OM_msg[] =
639 "Out of range memory error has occurred";
640 static const char JPAFSR_ETP_msg[] =
641 "Parity error on L2 cache tag SRAM";
642 static const char JPAFSR_UMS_msg[] =
643 "Error due to unsupported store";
644 static const char JPAFSR_RUE_msg[] =
645 "Uncorrectable ECC error from remote cache/memory";
646 static const char JPAFSR_RCE_msg[] =
647 "Correctable ECC error from remote cache/memory";
648 static const char JPAFSR_BP_msg[] =
649 "JBUS parity error on returned read data";
650 static const char JPAFSR_WBP_msg[] =
651 "JBUS parity error on data for writeback or block store";
652 static const char JPAFSR_FRC_msg[] =
653 "Foreign read to DRAM incurring correctable ECC error";
654 static const char JPAFSR_FRU_msg[] =
655 "Foreign read to DRAM incurring uncorrectable ECC error";
656 static struct afsr_error_table __jalapeno_error_table[] = {
657 { JPAFSR_JETO, JPAFSR_JETO_msg },
658 { JPAFSR_SCE, JPAFSR_SCE_msg },
659 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
660 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
661 { CHAFSR_PERR, CHAFSR_PERR_msg },
662 { CHAFSR_IERR, CHAFSR_IERR_msg },
663 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
664 { CHAFSR_UCU, CHAFSR_UCU_msg },
665 { CHAFSR_UCC, CHAFSR_UCC_msg },
666 { CHAFSR_UE, CHAFSR_UE_msg },
667 { CHAFSR_EDU, CHAFSR_EDU_msg },
668 { JPAFSR_OM, JPAFSR_OM_msg },
669 { CHAFSR_WDU, CHAFSR_WDU_msg },
670 { CHAFSR_CPU, CHAFSR_CPU_msg },
671 { CHAFSR_CE, CHAFSR_CE_msg },
672 { CHAFSR_EDC, CHAFSR_EDC_msg },
673 { JPAFSR_ETP, JPAFSR_ETP_msg },
674 { CHAFSR_WDC, CHAFSR_WDC_msg },
675 { CHAFSR_CPC, CHAFSR_CPC_msg },
676 { CHAFSR_TO, CHAFSR_TO_msg },
677 { CHAFSR_BERR, CHAFSR_BERR_msg },
678 { JPAFSR_UMS, JPAFSR_UMS_msg },
679 { JPAFSR_RUE, JPAFSR_RUE_msg },
680 { JPAFSR_RCE, JPAFSR_RCE_msg },
681 { JPAFSR_BP, JPAFSR_BP_msg },
682 { JPAFSR_WBP, JPAFSR_WBP_msg },
683 { JPAFSR_FRC, JPAFSR_FRC_msg },
684 { JPAFSR_FRU, JPAFSR_FRU_msg },
685 /* These two do not update the AFAR. */
686 { CHAFSR_IVU, CHAFSR_IVU_msg },
689 static struct afsr_error_table *cheetah_error_table;
690 static unsigned long cheetah_afsr_errors;
692 /* This is allocated at boot time based upon the largest hardware
693 * cpu ID in the system. We allocate two entries per cpu, one for
694 * TL==0 logging and one for TL >= 1 logging.
696 struct cheetah_err_info *cheetah_error_log;
698 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
700 struct cheetah_err_info *p;
701 int cpu = smp_processor_id();
703 if (!cheetah_error_log)
706 p = cheetah_error_log + (cpu * 2);
707 if ((afsr & CHAFSR_TL1) != 0UL)
713 extern unsigned int tl0_icpe[], tl1_icpe[];
714 extern unsigned int tl0_dcpe[], tl1_dcpe[];
715 extern unsigned int tl0_fecc[], tl1_fecc[];
716 extern unsigned int tl0_cee[], tl1_cee[];
717 extern unsigned int tl0_iae[], tl1_iae[];
718 extern unsigned int tl0_dae[], tl1_dae[];
719 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
720 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
721 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
722 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
723 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
725 void __init cheetah_ecache_flush_init(void)
727 unsigned long largest_size, smallest_linesize, order, ver;
728 int node, i, instance;
730 /* Scan all cpu device tree nodes, note two values:
731 * 1) largest E-cache size
732 * 2) smallest E-cache line size
735 smallest_linesize = ~0UL;
738 while (!cpu_find_by_instance(instance, &node, NULL)) {
741 val = prom_getintdefault(node, "ecache-size",
743 if (val > largest_size)
745 val = prom_getintdefault(node, "ecache-line-size", 64);
746 if (val < smallest_linesize)
747 smallest_linesize = val;
751 if (largest_size == 0UL || smallest_linesize == ~0UL) {
752 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
757 ecache_flush_size = (2 * largest_size);
758 ecache_flush_linesize = smallest_linesize;
760 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
762 if (ecache_flush_physbase == ~0UL) {
763 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
764 "contiguous physical memory.\n",
769 /* Now allocate error trap reporting scoreboard. */
770 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
771 for (order = 0; order < MAX_ORDER; order++) {
772 if ((PAGE_SIZE << order) >= node)
775 cheetah_error_log = (struct cheetah_err_info *)
776 __get_free_pages(GFP_KERNEL, order);
777 if (!cheetah_error_log) {
778 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
779 "error logging scoreboard (%d bytes).\n", node);
782 memset(cheetah_error_log, 0, PAGE_SIZE << order);
784 /* Mark all AFSRs as invalid so that the trap handler will
785 * log new new information there.
787 for (i = 0; i < 2 * NR_CPUS; i++)
788 cheetah_error_log[i].afsr = CHAFSR_INVALID;
790 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
791 if ((ver >> 32) == 0x003e0016) {
792 cheetah_error_table = &__jalapeno_error_table[0];
793 cheetah_afsr_errors = JPAFSR_ERRORS;
794 } else if ((ver >> 32) == 0x003e0015) {
795 cheetah_error_table = &__cheetah_plus_error_table[0];
796 cheetah_afsr_errors = CHPAFSR_ERRORS;
798 cheetah_error_table = &__cheetah_error_table[0];
799 cheetah_afsr_errors = CHAFSR_ERRORS;
802 /* Now patch trap tables. */
803 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
804 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
805 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
806 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
807 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
808 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
809 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
810 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
811 if (tlb_type == cheetah_plus) {
812 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
813 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
814 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
815 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
820 static void cheetah_flush_ecache(void)
822 unsigned long flush_base = ecache_flush_physbase;
823 unsigned long flush_linesize = ecache_flush_linesize;
824 unsigned long flush_size = ecache_flush_size;
826 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
827 " bne,pt %%xcc, 1b\n\t"
828 " ldxa [%2 + %0] %3, %%g0\n\t"
830 : "0" (flush_size), "r" (flush_base),
831 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
834 static void cheetah_flush_ecache_line(unsigned long physaddr)
838 physaddr &= ~(8UL - 1UL);
839 physaddr = (ecache_flush_physbase +
840 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
841 alias = physaddr + (ecache_flush_size >> 1UL);
842 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
843 "ldxa [%1] %2, %%g0\n\t"
846 : "r" (physaddr), "r" (alias),
847 "i" (ASI_PHYS_USE_EC));
850 /* Unfortunately, the diagnostic access to the I-cache tags we need to
851 * use to clear the thing interferes with I-cache coherency transactions.
853 * So we must only flush the I-cache when it is disabled.
855 static void __cheetah_flush_icache(void)
857 unsigned int icache_size, icache_line_size;
860 icache_size = local_cpu_data().icache_size;
861 icache_line_size = local_cpu_data().icache_line_size;
863 /* Clear the valid bits in all the tags. */
864 for (addr = 0; addr < icache_size; addr += icache_line_size) {
865 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
868 : "r" (addr | (2 << 3)),
873 static void cheetah_flush_icache(void)
875 unsigned long dcu_save;
877 /* Save current DCU, disable I-cache. */
878 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
879 "or %0, %2, %%g1\n\t"
880 "stxa %%g1, [%%g0] %1\n\t"
883 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
886 __cheetah_flush_icache();
888 /* Restore DCU register */
889 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
892 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
895 static void cheetah_flush_dcache(void)
897 unsigned int dcache_size, dcache_line_size;
900 dcache_size = local_cpu_data().dcache_size;
901 dcache_line_size = local_cpu_data().dcache_line_size;
903 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
904 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
907 : "r" (addr), "i" (ASI_DCACHE_TAG));
911 /* In order to make the even parity correct we must do two things.
912 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
913 * Next, we clear out all 32-bytes of data for that line. Data of
914 * all-zero + tag parity value of zero == correct parity.
916 static void cheetah_plus_zap_dcache_parity(void)
918 unsigned int dcache_size, dcache_line_size;
921 dcache_size = local_cpu_data().dcache_size;
922 dcache_line_size = local_cpu_data().dcache_line_size;
924 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
925 unsigned long tag = (addr >> 14);
928 __asm__ __volatile__("membar #Sync\n\t"
929 "stxa %0, [%1] %2\n\t"
932 : "r" (tag), "r" (addr),
933 "i" (ASI_DCACHE_UTAG));
934 for (line = addr; line < addr + dcache_line_size; line += 8)
935 __asm__ __volatile__("membar #Sync\n\t"
936 "stxa %%g0, [%0] %1\n\t"
940 "i" (ASI_DCACHE_DATA));
944 /* Conversion tables used to frob Cheetah AFSR syndrome values into
945 * something palatable to the memory controller driver get_unumber
969 static unsigned char cheetah_ecc_syntab[] = {
970 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
971 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
972 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
973 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
974 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
975 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
976 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
977 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
978 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
979 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
980 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
981 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
982 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
983 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
984 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
985 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
986 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
987 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
988 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
989 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
990 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
991 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
992 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
993 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
994 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
995 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
996 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
997 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
998 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
999 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1000 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1001 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1003 static unsigned char cheetah_mtag_syntab[] = {
1014 /* Return the highest priority error conditon mentioned. */
1015 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1017 unsigned long tmp = 0;
1020 for (i = 0; cheetah_error_table[i].mask; i++) {
1021 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1027 static const char *cheetah_get_string(unsigned long bit)
1031 for (i = 0; cheetah_error_table[i].mask; i++) {
1032 if ((bit & cheetah_error_table[i].mask) != 0UL)
1033 return cheetah_error_table[i].name;
1038 extern int chmc_getunumber(int, unsigned long, char *, int);
1040 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1041 unsigned long afsr, unsigned long afar, int recoverable)
1043 unsigned long hipri;
1046 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1047 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1049 (afsr & CHAFSR_TL1) ? 1 : 0);
1050 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1051 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1052 regs->tpc, regs->tnpc, regs->tstate);
1053 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1054 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1055 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1056 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1057 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1058 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1059 hipri = cheetah_get_hipri(afsr);
1060 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1061 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1062 hipri, cheetah_get_string(hipri));
1064 /* Try to get unumber if relevant. */
1065 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1066 CHAFSR_CPC | CHAFSR_CPU | \
1067 CHAFSR_UE | CHAFSR_CE | \
1068 CHAFSR_EDC | CHAFSR_EDU | \
1069 CHAFSR_UCC | CHAFSR_UCU | \
1070 CHAFSR_WDU | CHAFSR_WDC)
1071 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1072 if (afsr & ESYND_ERRORS) {
1076 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1077 syndrome = cheetah_ecc_syntab[syndrome];
1078 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1080 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1081 (recoverable ? KERN_WARNING : KERN_CRIT),
1082 smp_processor_id(), unum);
1083 } else if (afsr & MSYND_ERRORS) {
1087 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1088 syndrome = cheetah_mtag_syntab[syndrome];
1089 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1091 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1092 (recoverable ? KERN_WARNING : KERN_CRIT),
1093 smp_processor_id(), unum);
1096 /* Now dump the cache snapshots. */
1097 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1098 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1099 (int) info->dcache_index,
1103 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1104 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1105 info->dcache_data[0],
1106 info->dcache_data[1],
1107 info->dcache_data[2],
1108 info->dcache_data[3]);
1109 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1110 "u[%016lx] l[%016lx]\n",
1111 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1112 (int) info->icache_index,
1117 info->icache_lower);
1118 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1119 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1120 info->icache_data[0],
1121 info->icache_data[1],
1122 info->icache_data[2],
1123 info->icache_data[3]);
1124 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1125 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1126 info->icache_data[4],
1127 info->icache_data[5],
1128 info->icache_data[6],
1129 info->icache_data[7]);
1130 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1131 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1132 (int) info->ecache_index, info->ecache_tag);
1133 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 info->ecache_data[0],
1136 info->ecache_data[1],
1137 info->ecache_data[2],
1138 info->ecache_data[3]);
1140 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1141 while (afsr != 0UL) {
1142 unsigned long bit = cheetah_get_hipri(afsr);
1144 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1145 (recoverable ? KERN_WARNING : KERN_CRIT),
1146 bit, cheetah_get_string(bit));
1152 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1155 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1157 unsigned long afsr, afar;
1160 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1163 if ((afsr & cheetah_afsr_errors) != 0) {
1165 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1173 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1175 : : "r" (afsr), "i" (ASI_AFSR));
1180 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1182 struct cheetah_err_info local_snapshot, *p;
1186 cheetah_flush_ecache();
1188 p = cheetah_get_error_log(afsr);
1190 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1192 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1193 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1197 /* Grab snapshot of logged error. */
1198 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1200 /* If the current trap snapshot does not match what the
1201 * trap handler passed along into our args, big trouble.
1202 * In such a case, mark the local copy as invalid.
1204 * Else, it matches and we mark the afsr in the non-local
1205 * copy as invalid so we may log new error traps there.
1207 if (p->afsr != afsr || p->afar != afar)
1208 local_snapshot.afsr = CHAFSR_INVALID;
1210 p->afsr = CHAFSR_INVALID;
1212 cheetah_flush_icache();
1213 cheetah_flush_dcache();
1215 /* Re-enable I-cache/D-cache */
1216 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1217 "or %%g1, %1, %%g1\n\t"
1218 "stxa %%g1, [%%g0] %0\n\t"
1221 : "i" (ASI_DCU_CONTROL_REG),
1222 "i" (DCU_DC | DCU_IC)
1225 /* Re-enable error reporting */
1226 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1227 "or %%g1, %1, %%g1\n\t"
1228 "stxa %%g1, [%%g0] %0\n\t"
1231 : "i" (ASI_ESTATE_ERROR_EN),
1232 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1235 /* Decide if we can continue after handling this trap and
1236 * logging the error.
1239 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1242 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1243 * error was logged while we had error reporting traps disabled.
1245 if (cheetah_recheck_errors(&local_snapshot)) {
1246 unsigned long new_afsr = local_snapshot.afsr;
1248 /* If we got a new asynchronous error, die... */
1249 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1250 CHAFSR_WDU | CHAFSR_CPU |
1251 CHAFSR_IVU | CHAFSR_UE |
1252 CHAFSR_BERR | CHAFSR_TO))
1257 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1260 panic("Irrecoverable Fast-ECC error trap.\n");
1262 /* Flush E-cache to kick the error trap handlers out. */
1263 cheetah_flush_ecache();
1266 /* Try to fix a correctable error by pushing the line out from
1267 * the E-cache. Recheck error reporting registers to see if the
1268 * problem is intermittent.
1270 static int cheetah_fix_ce(unsigned long physaddr)
1272 unsigned long orig_estate;
1273 unsigned long alias1, alias2;
1276 /* Make sure correctable error traps are disabled. */
1277 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1278 "andn %0, %1, %%g1\n\t"
1279 "stxa %%g1, [%%g0] %2\n\t"
1281 : "=&r" (orig_estate)
1282 : "i" (ESTATE_ERROR_CEEN),
1283 "i" (ASI_ESTATE_ERROR_EN)
1286 /* We calculate alias addresses that will force the
1287 * cache line in question out of the E-cache. Then
1288 * we bring it back in with an atomic instruction so
1289 * that we get it in some modified/exclusive state,
1290 * then we displace it again to try and get proper ECC
1291 * pushed back into the system.
1293 physaddr &= ~(8UL - 1UL);
1294 alias1 = (ecache_flush_physbase +
1295 (physaddr & ((ecache_flush_size >> 1) - 1)));
1296 alias2 = alias1 + (ecache_flush_size >> 1);
1297 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1298 "ldxa [%1] %3, %%g0\n\t"
1299 "casxa [%2] %3, %%g0, %%g0\n\t"
1300 "membar #StoreLoad | #StoreStore\n\t"
1301 "ldxa [%0] %3, %%g0\n\t"
1302 "ldxa [%1] %3, %%g0\n\t"
1305 : "r" (alias1), "r" (alias2),
1306 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1308 /* Did that trigger another error? */
1309 if (cheetah_recheck_errors(NULL)) {
1310 /* Try one more time. */
1311 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1313 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1314 if (cheetah_recheck_errors(NULL))
1319 /* No new error, intermittent problem. */
1323 /* Restore error enables. */
1324 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1326 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1331 /* Return non-zero if PADDR is a valid physical memory address. */
1332 static int cheetah_check_main_memory(unsigned long paddr)
1334 unsigned long vaddr = PAGE_OFFSET + paddr;
1336 return kern_addr_valid(vaddr);
1339 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1341 struct cheetah_err_info local_snapshot, *p;
1342 int recoverable, is_memory;
1344 p = cheetah_get_error_log(afsr);
1346 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1348 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1349 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1353 /* Grab snapshot of logged error. */
1354 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1356 /* If the current trap snapshot does not match what the
1357 * trap handler passed along into our args, big trouble.
1358 * In such a case, mark the local copy as invalid.
1360 * Else, it matches and we mark the afsr in the non-local
1361 * copy as invalid so we may log new error traps there.
1363 if (p->afsr != afsr || p->afar != afar)
1364 local_snapshot.afsr = CHAFSR_INVALID;
1366 p->afsr = CHAFSR_INVALID;
1368 is_memory = cheetah_check_main_memory(afar);
1370 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1371 /* XXX Might want to log the results of this operation
1372 * XXX somewhere... -DaveM
1374 cheetah_fix_ce(afar);
1378 int flush_all, flush_line;
1380 flush_all = flush_line = 0;
1381 if ((afsr & CHAFSR_EDC) != 0UL) {
1382 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1386 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1387 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1393 /* Trap handler only disabled I-cache, flush it. */
1394 cheetah_flush_icache();
1396 /* Re-enable I-cache */
1397 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1398 "or %%g1, %1, %%g1\n\t"
1399 "stxa %%g1, [%%g0] %0\n\t"
1402 : "i" (ASI_DCU_CONTROL_REG),
1407 cheetah_flush_ecache();
1408 else if (flush_line)
1409 cheetah_flush_ecache_line(afar);
1412 /* Re-enable error reporting */
1413 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1414 "or %%g1, %1, %%g1\n\t"
1415 "stxa %%g1, [%%g0] %0\n\t"
1418 : "i" (ASI_ESTATE_ERROR_EN),
1419 "i" (ESTATE_ERROR_CEEN)
1422 /* Decide if we can continue after handling this trap and
1423 * logging the error.
1426 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1429 /* Re-check AFSR/AFAR */
1430 (void) cheetah_recheck_errors(&local_snapshot);
1433 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1436 panic("Irrecoverable Correctable-ECC error trap.\n");
1439 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1441 struct cheetah_err_info local_snapshot, *p;
1442 int recoverable, is_memory;
1445 /* Check for the special PCI poke sequence. */
1446 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1447 cheetah_flush_icache();
1448 cheetah_flush_dcache();
1450 /* Re-enable I-cache/D-cache */
1451 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1452 "or %%g1, %1, %%g1\n\t"
1453 "stxa %%g1, [%%g0] %0\n\t"
1456 : "i" (ASI_DCU_CONTROL_REG),
1457 "i" (DCU_DC | DCU_IC)
1460 /* Re-enable error reporting */
1461 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1462 "or %%g1, %1, %%g1\n\t"
1463 "stxa %%g1, [%%g0] %0\n\t"
1466 : "i" (ASI_ESTATE_ERROR_EN),
1467 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1470 (void) cheetah_recheck_errors(NULL);
1472 pci_poke_faulted = 1;
1474 regs->tnpc = regs->tpc + 4;
1479 p = cheetah_get_error_log(afsr);
1481 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1483 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1484 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1488 /* Grab snapshot of logged error. */
1489 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1491 /* If the current trap snapshot does not match what the
1492 * trap handler passed along into our args, big trouble.
1493 * In such a case, mark the local copy as invalid.
1495 * Else, it matches and we mark the afsr in the non-local
1496 * copy as invalid so we may log new error traps there.
1498 if (p->afsr != afsr || p->afar != afar)
1499 local_snapshot.afsr = CHAFSR_INVALID;
1501 p->afsr = CHAFSR_INVALID;
1503 is_memory = cheetah_check_main_memory(afar);
1506 int flush_all, flush_line;
1508 flush_all = flush_line = 0;
1509 if ((afsr & CHAFSR_EDU) != 0UL) {
1510 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1514 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1515 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1521 cheetah_flush_icache();
1522 cheetah_flush_dcache();
1524 /* Re-enable I/D caches */
1525 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1526 "or %%g1, %1, %%g1\n\t"
1527 "stxa %%g1, [%%g0] %0\n\t"
1530 : "i" (ASI_DCU_CONTROL_REG),
1531 "i" (DCU_IC | DCU_DC)
1535 cheetah_flush_ecache();
1536 else if (flush_line)
1537 cheetah_flush_ecache_line(afar);
1540 /* Re-enable error reporting */
1541 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1542 "or %%g1, %1, %%g1\n\t"
1543 "stxa %%g1, [%%g0] %0\n\t"
1546 : "i" (ASI_ESTATE_ERROR_EN),
1547 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1550 /* Decide if we can continue after handling this trap and
1551 * logging the error.
1554 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1557 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1558 * error was logged while we had error reporting traps disabled.
1560 if (cheetah_recheck_errors(&local_snapshot)) {
1561 unsigned long new_afsr = local_snapshot.afsr;
1563 /* If we got a new asynchronous error, die... */
1564 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1565 CHAFSR_WDU | CHAFSR_CPU |
1566 CHAFSR_IVU | CHAFSR_UE |
1567 CHAFSR_BERR | CHAFSR_TO))
1572 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1574 /* "Recoverable" here means we try to yank the page from ever
1575 * being newly used again. This depends upon a few things:
1576 * 1) Must be main memory, and AFAR must be valid.
1577 * 2) If we trapped from user, OK.
1578 * 3) Else, if we trapped from kernel we must find exception
1579 * table entry (ie. we have to have been accessing user
1582 * If AFAR is not in main memory, or we trapped from kernel
1583 * and cannot find an exception table entry, it is unacceptable
1584 * to try and continue.
1586 if (recoverable && is_memory) {
1587 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1588 /* OK, usermode access. */
1591 const struct exception_table_entry *entry;
1593 entry = search_exception_tables(regs->tpc);
1595 /* OK, kernel access to userspace. */
1599 /* BAD, privileged state is corrupted. */
1604 if (pfn_valid(afar >> PAGE_SHIFT))
1605 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1609 /* Only perform fixup if we still have a
1610 * recoverable condition.
1613 regs->tpc = entry->fixup;
1614 regs->tnpc = regs->tpc + 4;
1623 panic("Irrecoverable deferred error trap.\n");
1626 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1628 * Bit0: 0=dcache,1=icache
1629 * Bit1: 0=recoverable,1=unrecoverable
1631 * The hardware has disabled both the I-cache and D-cache in
1632 * the %dcr register.
1634 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1637 __cheetah_flush_icache();
1639 cheetah_plus_zap_dcache_parity();
1640 cheetah_flush_dcache();
1642 /* Re-enable I-cache/D-cache */
1643 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1644 "or %%g1, %1, %%g1\n\t"
1645 "stxa %%g1, [%%g0] %0\n\t"
1648 : "i" (ASI_DCU_CONTROL_REG),
1649 "i" (DCU_DC | DCU_IC)
1653 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1655 (type & 0x1) ? 'I' : 'D',
1657 panic("Irrecoverable Cheetah+ parity error.");
1660 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1662 (type & 0x1) ? 'I' : 'D',
1666 void do_fpe_common(struct pt_regs *regs)
1668 if (regs->tstate & TSTATE_PRIV) {
1669 regs->tpc = regs->tnpc;
1672 unsigned long fsr = current_thread_info()->xfsr[0];
1675 if (test_thread_flag(TIF_32BIT)) {
1676 regs->tpc &= 0xffffffff;
1677 regs->tnpc &= 0xffffffff;
1679 info.si_signo = SIGFPE;
1681 info.si_addr = (void __user *)regs->tpc;
1683 info.si_code = __SI_FAULT;
1684 if ((fsr & 0x1c000) == (1 << 14)) {
1686 info.si_code = FPE_FLTINV;
1687 else if (fsr & 0x08)
1688 info.si_code = FPE_FLTOVF;
1689 else if (fsr & 0x04)
1690 info.si_code = FPE_FLTUND;
1691 else if (fsr & 0x02)
1692 info.si_code = FPE_FLTDIV;
1693 else if (fsr & 0x01)
1694 info.si_code = FPE_FLTRES;
1696 force_sig_info(SIGFPE, &info, current);
1700 void do_fpieee(struct pt_regs *regs)
1702 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1703 0, 0x24, SIGFPE) == NOTIFY_STOP)
1706 do_fpe_common(regs);
1709 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1711 void do_fpother(struct pt_regs *regs)
1713 struct fpustate *f = FPUSTATE;
1716 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1717 0, 0x25, SIGFPE) == NOTIFY_STOP)
1720 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1721 case (2 << 14): /* unfinished_FPop */
1722 case (3 << 14): /* unimplemented_FPop */
1723 ret = do_mathemu(regs, f);
1728 do_fpe_common(regs);
1731 void do_tof(struct pt_regs *regs)
1735 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1736 0, 0x26, SIGEMT) == NOTIFY_STOP)
1739 if (regs->tstate & TSTATE_PRIV)
1740 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1741 if (test_thread_flag(TIF_32BIT)) {
1742 regs->tpc &= 0xffffffff;
1743 regs->tnpc &= 0xffffffff;
1745 info.si_signo = SIGEMT;
1747 info.si_code = EMT_TAGOVF;
1748 info.si_addr = (void __user *)regs->tpc;
1750 force_sig_info(SIGEMT, &info, current);
1753 void do_div0(struct pt_regs *regs)
1757 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1758 0, 0x28, SIGFPE) == NOTIFY_STOP)
1761 if (regs->tstate & TSTATE_PRIV)
1762 die_if_kernel("TL0: Kernel divide by zero.", regs);
1763 if (test_thread_flag(TIF_32BIT)) {
1764 regs->tpc &= 0xffffffff;
1765 regs->tnpc &= 0xffffffff;
1767 info.si_signo = SIGFPE;
1769 info.si_code = FPE_INTDIV;
1770 info.si_addr = (void __user *)regs->tpc;
1772 force_sig_info(SIGFPE, &info, current);
1775 void instruction_dump (unsigned int *pc)
1779 if ((((unsigned long) pc) & 3))
1782 printk("Instruction DUMP:");
1783 for (i = -3; i < 6; i++)
1784 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1788 static void user_instruction_dump (unsigned int __user *pc)
1791 unsigned int buf[9];
1793 if ((((unsigned long) pc) & 3))
1796 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1799 printk("Instruction DUMP:");
1800 for (i = 0; i < 9; i++)
1801 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1805 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1807 unsigned long pc, fp, thread_base, ksp;
1808 struct thread_info *tp = tsk->thread_info;
1809 struct reg_window *rw;
1812 ksp = (unsigned long) _ksp;
1814 if (tp == current_thread_info())
1817 fp = ksp + STACK_BIAS;
1818 thread_base = (unsigned long) tp;
1820 printk("Call Trace:");
1821 #ifdef CONFIG_KALLSYMS
1825 /* Bogus frame pointer? */
1826 if (fp < (thread_base + sizeof(struct thread_info)) ||
1827 fp >= (thread_base + THREAD_SIZE))
1829 rw = (struct reg_window *)fp;
1831 printk(" [%016lx] ", pc);
1832 print_symbol("%s\n", pc);
1833 fp = rw->ins[6] + STACK_BIAS;
1834 } while (++count < 16);
1835 #ifndef CONFIG_KALLSYMS
1840 void dump_stack(void)
1844 __asm__ __volatile__("mov %%fp, %0"
1846 show_stack(current, ksp);
1849 EXPORT_SYMBOL(dump_stack);
1851 static inline int is_kernel_stack(struct task_struct *task,
1852 struct reg_window *rw)
1854 unsigned long rw_addr = (unsigned long) rw;
1855 unsigned long thread_base, thread_end;
1857 if (rw_addr < PAGE_OFFSET) {
1858 if (task != &init_task)
1862 thread_base = (unsigned long) task->thread_info;
1863 thread_end = thread_base + sizeof(union thread_union);
1864 if (rw_addr >= thread_base &&
1865 rw_addr < thread_end &&
1872 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1874 unsigned long fp = rw->ins[6];
1879 return (struct reg_window *) (fp + STACK_BIAS);
1882 void die_if_kernel(char *str, struct pt_regs *regs)
1884 static int die_counter;
1885 extern void __show_regs(struct pt_regs * regs);
1886 extern void smp_report_regs(void);
1889 /* Amuse the user. */
1892 " \"@'/ .. \\`@\"\n"
1896 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1897 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1898 __asm__ __volatile__("flushw");
1900 if (regs->tstate & TSTATE_PRIV) {
1901 struct reg_window *rw = (struct reg_window *)
1902 (regs->u_regs[UREG_FP] + STACK_BIAS);
1904 /* Stop the back trace when we hit userland or we
1905 * find some badly aligned kernel stack.
1909 is_kernel_stack(current, rw)) {
1910 printk("Caller[%016lx]", rw->ins[7]);
1911 print_symbol(": %s", rw->ins[7]);
1914 rw = kernel_stack_up(rw);
1916 instruction_dump ((unsigned int *) regs->tpc);
1918 if (test_thread_flag(TIF_32BIT)) {
1919 regs->tpc &= 0xffffffff;
1920 regs->tnpc &= 0xffffffff;
1922 user_instruction_dump ((unsigned int __user *) regs->tpc);
1928 if (regs->tstate & TSTATE_PRIV)
1933 extern int handle_popc(u32 insn, struct pt_regs *regs);
1934 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1936 void do_illegal_instruction(struct pt_regs *regs)
1938 unsigned long pc = regs->tpc;
1939 unsigned long tstate = regs->tstate;
1943 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1944 0, 0x10, SIGILL) == NOTIFY_STOP)
1947 if (tstate & TSTATE_PRIV)
1948 die_if_kernel("Kernel illegal instruction", regs);
1949 if (test_thread_flag(TIF_32BIT))
1951 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1952 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1953 if (handle_popc(insn, regs))
1955 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1956 if (handle_ldf_stq(insn, regs))
1960 info.si_signo = SIGILL;
1962 info.si_code = ILL_ILLOPC;
1963 info.si_addr = (void __user *)pc;
1965 force_sig_info(SIGILL, &info, current);
1968 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1972 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1973 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1976 if (regs->tstate & TSTATE_PRIV) {
1977 extern void kernel_unaligned_trap(struct pt_regs *regs,
1980 unsigned long sfsr);
1982 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1986 info.si_signo = SIGBUS;
1988 info.si_code = BUS_ADRALN;
1989 info.si_addr = (void __user *)sfar;
1991 force_sig_info(SIGBUS, &info, current);
1994 void do_privop(struct pt_regs *regs)
1998 if (notify_die(DIE_TRAP, "privileged operation", regs,
1999 0, 0x11, SIGILL) == NOTIFY_STOP)
2002 if (test_thread_flag(TIF_32BIT)) {
2003 regs->tpc &= 0xffffffff;
2004 regs->tnpc &= 0xffffffff;
2006 info.si_signo = SIGILL;
2008 info.si_code = ILL_PRVOPC;
2009 info.si_addr = (void __user *)regs->tpc;
2011 force_sig_info(SIGILL, &info, current);
2014 void do_privact(struct pt_regs *regs)
2019 /* Trap level 1 stuff or other traps we should never see... */
2020 void do_cee(struct pt_regs *regs)
2022 die_if_kernel("TL0: Cache Error Exception", regs);
2025 void do_cee_tl1(struct pt_regs *regs)
2027 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2028 die_if_kernel("TL1: Cache Error Exception", regs);
2031 void do_dae_tl1(struct pt_regs *regs)
2033 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2034 die_if_kernel("TL1: Data Access Exception", regs);
2037 void do_iae_tl1(struct pt_regs *regs)
2039 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2040 die_if_kernel("TL1: Instruction Access Exception", regs);
2043 void do_div0_tl1(struct pt_regs *regs)
2045 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2046 die_if_kernel("TL1: DIV0 Exception", regs);
2049 void do_fpdis_tl1(struct pt_regs *regs)
2051 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2052 die_if_kernel("TL1: FPU Disabled", regs);
2055 void do_fpieee_tl1(struct pt_regs *regs)
2057 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2058 die_if_kernel("TL1: FPU IEEE Exception", regs);
2061 void do_fpother_tl1(struct pt_regs *regs)
2063 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2064 die_if_kernel("TL1: FPU Other Exception", regs);
2067 void do_ill_tl1(struct pt_regs *regs)
2069 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2070 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2073 void do_irq_tl1(struct pt_regs *regs)
2075 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2076 die_if_kernel("TL1: IRQ Exception", regs);
2079 void do_lddfmna_tl1(struct pt_regs *regs)
2081 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2082 die_if_kernel("TL1: LDDF Exception", regs);
2085 void do_stdfmna_tl1(struct pt_regs *regs)
2087 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2088 die_if_kernel("TL1: STDF Exception", regs);
2091 void do_paw(struct pt_regs *regs)
2093 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2096 void do_paw_tl1(struct pt_regs *regs)
2098 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2099 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2102 void do_vaw(struct pt_regs *regs)
2104 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2107 void do_vaw_tl1(struct pt_regs *regs)
2109 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2110 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2113 void do_tof_tl1(struct pt_regs *regs)
2115 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2116 die_if_kernel("TL1: Tag Overflow Exception", regs);
2119 void do_getpsr(struct pt_regs *regs)
2121 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2122 regs->tpc = regs->tnpc;
2124 if (test_thread_flag(TIF_32BIT)) {
2125 regs->tpc &= 0xffffffff;
2126 regs->tnpc &= 0xffffffff;
2130 extern void thread_info_offsets_are_bolixed_dave(void);
2132 /* Only invoked on boot processor. */
2133 void __init trap_init(void)
2135 /* Compile time sanity check. */
2136 if (TI_TASK != offsetof(struct thread_info, task) ||
2137 TI_FLAGS != offsetof(struct thread_info, flags) ||
2138 TI_CPU != offsetof(struct thread_info, cpu) ||
2139 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2140 TI_KSP != offsetof(struct thread_info, ksp) ||
2141 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2142 TI_KREGS != offsetof(struct thread_info, kregs) ||
2143 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2144 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2145 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2146 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2147 TI_GSR != offsetof(struct thread_info, gsr) ||
2148 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2149 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2150 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2151 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2152 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2153 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2154 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2155 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2156 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2157 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2158 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2159 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2160 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2161 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2162 (TI_FPREGS & (64 - 1)))
2163 thread_info_offsets_are_bolixed_dave();
2165 /* Attach to the address space of init_task. On SMP we
2166 * do this in smp.c:smp_callin for other cpus.
2168 atomic_inc(&init_mm.mm_count);
2169 current->active_mm = &init_mm;