2 * arch/blackfin/kernel/kgdb.c - Blackfin kgdb pieces
4 * Copyright 2005-2008 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/string.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/delay.h>
15 #include <linux/ptrace.h> /* for linux pt_regs struct */
16 #include <linux/kgdb.h>
17 #include <linux/console.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/irq.h>
21 #include <linux/uaccess.h>
22 #include <asm/system.h>
23 #include <asm/traps.h>
24 #include <asm/blackfin.h>
27 /* Put the error code here just in case the user cares. */
29 /* Likewise, the vector number here (since GDB only gets the signal
30 number through the usual means, and that's not very specific). */
31 int gdb_bfin_vector = -1;
33 #if KGDB_MAX_NO_CPUS != 8
34 #error change the definition of slavecpulocks
37 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
39 gdb_regs[BFIN_R0] = regs->r0;
40 gdb_regs[BFIN_R1] = regs->r1;
41 gdb_regs[BFIN_R2] = regs->r2;
42 gdb_regs[BFIN_R3] = regs->r3;
43 gdb_regs[BFIN_R4] = regs->r4;
44 gdb_regs[BFIN_R5] = regs->r5;
45 gdb_regs[BFIN_R6] = regs->r6;
46 gdb_regs[BFIN_R7] = regs->r7;
47 gdb_regs[BFIN_P0] = regs->p0;
48 gdb_regs[BFIN_P1] = regs->p1;
49 gdb_regs[BFIN_P2] = regs->p2;
50 gdb_regs[BFIN_P3] = regs->p3;
51 gdb_regs[BFIN_P4] = regs->p4;
52 gdb_regs[BFIN_P5] = regs->p5;
53 gdb_regs[BFIN_SP] = regs->reserved;
54 gdb_regs[BFIN_FP] = regs->fp;
55 gdb_regs[BFIN_I0] = regs->i0;
56 gdb_regs[BFIN_I1] = regs->i1;
57 gdb_regs[BFIN_I2] = regs->i2;
58 gdb_regs[BFIN_I3] = regs->i3;
59 gdb_regs[BFIN_M0] = regs->m0;
60 gdb_regs[BFIN_M1] = regs->m1;
61 gdb_regs[BFIN_M2] = regs->m2;
62 gdb_regs[BFIN_M3] = regs->m3;
63 gdb_regs[BFIN_B0] = regs->b0;
64 gdb_regs[BFIN_B1] = regs->b1;
65 gdb_regs[BFIN_B2] = regs->b2;
66 gdb_regs[BFIN_B3] = regs->b3;
67 gdb_regs[BFIN_L0] = regs->l0;
68 gdb_regs[BFIN_L1] = regs->l1;
69 gdb_regs[BFIN_L2] = regs->l2;
70 gdb_regs[BFIN_L3] = regs->l3;
71 gdb_regs[BFIN_A0_DOT_X] = regs->a0x;
72 gdb_regs[BFIN_A0_DOT_W] = regs->a0w;
73 gdb_regs[BFIN_A1_DOT_X] = regs->a1x;
74 gdb_regs[BFIN_A1_DOT_W] = regs->a1w;
75 gdb_regs[BFIN_ASTAT] = regs->astat;
76 gdb_regs[BFIN_RETS] = regs->rets;
77 gdb_regs[BFIN_LC0] = regs->lc0;
78 gdb_regs[BFIN_LT0] = regs->lt0;
79 gdb_regs[BFIN_LB0] = regs->lb0;
80 gdb_regs[BFIN_LC1] = regs->lc1;
81 gdb_regs[BFIN_LT1] = regs->lt1;
82 gdb_regs[BFIN_LB1] = regs->lb1;
83 gdb_regs[BFIN_CYCLES] = 0;
84 gdb_regs[BFIN_CYCLES2] = 0;
85 gdb_regs[BFIN_USP] = regs->usp;
86 gdb_regs[BFIN_SEQSTAT] = regs->seqstat;
87 gdb_regs[BFIN_SYSCFG] = regs->syscfg;
88 gdb_regs[BFIN_RETI] = regs->pc;
89 gdb_regs[BFIN_RETX] = regs->retx;
90 gdb_regs[BFIN_RETN] = regs->retn;
91 gdb_regs[BFIN_RETE] = regs->rete;
92 gdb_regs[BFIN_PC] = regs->pc;
93 gdb_regs[BFIN_CC] = 0;
94 gdb_regs[BFIN_EXTRA1] = 0;
95 gdb_regs[BFIN_EXTRA2] = 0;
96 gdb_regs[BFIN_EXTRA3] = 0;
97 gdb_regs[BFIN_IPEND] = regs->ipend;
101 * Extracts ebp, esp and eip values understandable by gdb from the values
102 * saved by switch_to.
103 * thread.esp points to ebp. flags and ebp are pushed in switch_to hence esp
104 * prior to entering switch_to is 8 greater then the value that is saved.
105 * If switch_to changes, change following code appropriately.
107 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
109 gdb_regs[BFIN_SP] = p->thread.ksp;
110 gdb_regs[BFIN_PC] = p->thread.pc;
111 gdb_regs[BFIN_SEQSTAT] = p->thread.seqstat;
114 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
116 regs->r0 = gdb_regs[BFIN_R0];
117 regs->r1 = gdb_regs[BFIN_R1];
118 regs->r2 = gdb_regs[BFIN_R2];
119 regs->r3 = gdb_regs[BFIN_R3];
120 regs->r4 = gdb_regs[BFIN_R4];
121 regs->r5 = gdb_regs[BFIN_R5];
122 regs->r6 = gdb_regs[BFIN_R6];
123 regs->r7 = gdb_regs[BFIN_R7];
124 regs->p0 = gdb_regs[BFIN_P0];
125 regs->p1 = gdb_regs[BFIN_P1];
126 regs->p2 = gdb_regs[BFIN_P2];
127 regs->p3 = gdb_regs[BFIN_P3];
128 regs->p4 = gdb_regs[BFIN_P4];
129 regs->p5 = gdb_regs[BFIN_P5];
130 regs->fp = gdb_regs[BFIN_FP];
131 regs->i0 = gdb_regs[BFIN_I0];
132 regs->i1 = gdb_regs[BFIN_I1];
133 regs->i2 = gdb_regs[BFIN_I2];
134 regs->i3 = gdb_regs[BFIN_I3];
135 regs->m0 = gdb_regs[BFIN_M0];
136 regs->m1 = gdb_regs[BFIN_M1];
137 regs->m2 = gdb_regs[BFIN_M2];
138 regs->m3 = gdb_regs[BFIN_M3];
139 regs->b0 = gdb_regs[BFIN_B0];
140 regs->b1 = gdb_regs[BFIN_B1];
141 regs->b2 = gdb_regs[BFIN_B2];
142 regs->b3 = gdb_regs[BFIN_B3];
143 regs->l0 = gdb_regs[BFIN_L0];
144 regs->l1 = gdb_regs[BFIN_L1];
145 regs->l2 = gdb_regs[BFIN_L2];
146 regs->l3 = gdb_regs[BFIN_L3];
147 regs->a0x = gdb_regs[BFIN_A0_DOT_X];
148 regs->a0w = gdb_regs[BFIN_A0_DOT_W];
149 regs->a1x = gdb_regs[BFIN_A1_DOT_X];
150 regs->a1w = gdb_regs[BFIN_A1_DOT_W];
151 regs->rets = gdb_regs[BFIN_RETS];
152 regs->lc0 = gdb_regs[BFIN_LC0];
153 regs->lt0 = gdb_regs[BFIN_LT0];
154 regs->lb0 = gdb_regs[BFIN_LB0];
155 regs->lc1 = gdb_regs[BFIN_LC1];
156 regs->lt1 = gdb_regs[BFIN_LT1];
157 regs->lb1 = gdb_regs[BFIN_LB1];
158 regs->usp = gdb_regs[BFIN_USP];
159 regs->syscfg = gdb_regs[BFIN_SYSCFG];
160 regs->retx = gdb_regs[BFIN_PC];
161 regs->retn = gdb_regs[BFIN_RETN];
162 regs->rete = gdb_regs[BFIN_RETE];
163 regs->pc = gdb_regs[BFIN_PC];
165 #if 0 /* can't change these */
166 regs->astat = gdb_regs[BFIN_ASTAT];
167 regs->seqstat = gdb_regs[BFIN_SEQSTAT];
168 regs->ipend = gdb_regs[BFIN_IPEND];
172 struct hw_breakpoint {
173 unsigned int occupied:1;
175 unsigned int enabled:1;
177 unsigned int dataacc:2;
178 unsigned short count;
180 } breakinfo[HW_WATCHPOINT_NUM];
182 int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
189 case BP_HARDWARE_BREAKPOINT:
190 bfin_type = TYPE_INST_WATCHPOINT;
192 case BP_WRITE_WATCHPOINT:
194 bfin_type = TYPE_DATA_WATCHPOINT;
196 case BP_READ_WATCHPOINT:
198 bfin_type = TYPE_DATA_WATCHPOINT;
200 case BP_ACCESS_WATCHPOINT:
202 bfin_type = TYPE_DATA_WATCHPOINT;
208 /* Becasue hardware data watchpoint impelemented in current
209 * Blackfin can not trigger an exception event as the hardware
210 * instrction watchpoint does, we ignaore all data watch point here.
211 * They can be turned on easily after future blackfin design
212 * supports this feature.
214 for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++)
215 if (bfin_type == breakinfo[breakno].type
216 && !breakinfo[breakno].occupied) {
217 breakinfo[breakno].occupied = 1;
218 breakinfo[breakno].skip = 0;
219 breakinfo[breakno].enabled = 1;
220 breakinfo[breakno].addr = addr;
221 breakinfo[breakno].dataacc = dataacc;
222 breakinfo[breakno].count = 0;
229 int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
235 case BP_HARDWARE_BREAKPOINT:
236 bfin_type = TYPE_INST_WATCHPOINT;
238 case BP_WRITE_WATCHPOINT:
239 case BP_READ_WATCHPOINT:
240 case BP_ACCESS_WATCHPOINT:
241 bfin_type = TYPE_DATA_WATCHPOINT;
246 for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++)
247 if (bfin_type == breakinfo[breakno].type
248 && breakinfo[breakno].occupied
249 && breakinfo[breakno].addr == addr) {
250 breakinfo[breakno].occupied = 0;
251 breakinfo[breakno].enabled = 0;
257 void bfin_remove_all_hw_break(void)
261 memset(breakinfo, 0, sizeof(struct hw_breakpoint)*HW_WATCHPOINT_NUM);
263 for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++)
264 breakinfo[breakno].type = TYPE_INST_WATCHPOINT;
265 for (; breakno < HW_WATCHPOINT_NUM; breakno++)
266 breakinfo[breakno].type = TYPE_DATA_WATCHPOINT;
269 void bfin_correct_hw_break(void)
272 unsigned int wpiactl = 0;
273 unsigned int wpdactl = 0;
276 for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++)
277 if (breakinfo[breakno].enabled) {
282 wpiactl |= WPIAEN0|WPICNTEN0;
283 bfin_write_WPIA0(breakinfo[breakno].addr);
284 bfin_write_WPIACNT0(breakinfo[breakno].count
288 wpiactl |= WPIAEN1|WPICNTEN1;
289 bfin_write_WPIA1(breakinfo[breakno].addr);
290 bfin_write_WPIACNT1(breakinfo[breakno].count
294 wpiactl |= WPIAEN2|WPICNTEN2;
295 bfin_write_WPIA2(breakinfo[breakno].addr);
296 bfin_write_WPIACNT2(breakinfo[breakno].count
300 wpiactl |= WPIAEN3|WPICNTEN3;
301 bfin_write_WPIA3(breakinfo[breakno].addr);
302 bfin_write_WPIACNT3(breakinfo[breakno].count
306 wpiactl |= WPIAEN4|WPICNTEN4;
307 bfin_write_WPIA4(breakinfo[breakno].addr);
308 bfin_write_WPIACNT4(breakinfo[breakno].count
312 wpiactl |= WPIAEN5|WPICNTEN5;
313 bfin_write_WPIA5(breakinfo[breakno].addr);
314 bfin_write_WPIACNT5(breakinfo[breakno].count
318 wpdactl |= WPDAEN0|WPDCNTEN0|WPDSRC0;
319 wpdactl |= breakinfo[breakno].dataacc
321 bfin_write_WPDA0(breakinfo[breakno].addr);
322 bfin_write_WPDACNT0(breakinfo[breakno].count
326 wpdactl |= WPDAEN1|WPDCNTEN1|WPDSRC1;
327 wpdactl |= breakinfo[breakno].dataacc
329 bfin_write_WPDA1(breakinfo[breakno].addr);
330 bfin_write_WPDACNT1(breakinfo[breakno].count
336 /* Should enable WPPWR bit first before set any other
337 * WPIACTL and WPDACTL bits */
339 bfin_write_WPIACTL(WPPWR);
341 bfin_write_WPIACTL(wpiactl|WPPWR);
342 bfin_write_WPDACTL(wpdactl);
347 void kgdb_disable_hw_debug(struct pt_regs *regs)
349 /* Disable hardware debugging while we are in kgdb */
350 bfin_write_WPIACTL(0);
351 bfin_write_WPDACTL(0);
356 void kgdb_passive_cpu_callback(void *info)
358 kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
361 void kgdb_roundup_cpus(unsigned long flags)
363 smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
366 void kgdb_roundup_cpu(int cpu, unsigned long flags)
368 smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
372 void kgdb_post_primary_code(struct pt_regs *regs, int eVector, int err_code)
374 /* Master processor is completely in the debugger */
375 gdb_bfin_vector = eVector;
376 gdb_bfin_errcode = err_code;
379 int kgdb_arch_handle_exception(int vector, int signo,
380 int err_code, char *remcom_in_buffer,
381 char *remcom_out_buffer,
382 struct pt_regs *regs)
389 switch (remcom_in_buffer[0]) {
392 if (kgdb_contthread && kgdb_contthread != current) {
393 strcpy(remcom_out_buffer, "E00");
397 kgdb_contthread = NULL;
399 /* try to read optional parameter, pc unchanged if no parm */
400 ptr = &remcom_in_buffer[1];
401 if (kgdb_hex2long(&ptr, &addr)) {
406 /* clear the trace bit */
407 regs->syscfg &= 0xfffffffe;
409 /* set the trace bit if we're stepping */
410 if (remcom_in_buffer[0] == 's') {
412 kgdb_single_step = regs->ipend;
413 kgdb_single_step >>= 6;
414 for (i = 10; i > 0; i--, kgdb_single_step >>= 1)
415 if (kgdb_single_step & 1)
417 /* i indicate event priority of current stopped instruction
418 * user space instruction is 0, IVG15 is 1, IVTMR is 10.
419 * kgdb_single_step > 0 means in single step mode
421 kgdb_single_step = i + 1;
424 bfin_correct_hw_break();
428 return -1; /* this means that we do not want to exit from the handler */
431 struct kgdb_arch arch_kgdb_ops = {
432 .gdb_bpt_instr = {0xa1},
434 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
436 .flags = KGDB_HW_BREAKPOINT,
438 .set_hw_breakpoint = bfin_set_hw_break,
439 .remove_hw_breakpoint = bfin_remove_hw_break,
440 .remove_all_hw_break = bfin_remove_all_hw_break,
441 .correct_hw_break = bfin_correct_hw_break,
444 static int hex(char ch)
446 if ((ch >= 'a') && (ch <= 'f'))
447 return ch - 'a' + 10;
448 if ((ch >= '0') && (ch <= '9'))
450 if ((ch >= 'A') && (ch <= 'F'))
451 return ch - 'A' + 10;
455 static int validate_memory_access_address(unsigned long addr, int size)
457 int cpu = raw_smp_processor_id();
461 if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
463 if (addr >= SYSMMR_BASE)
465 if (addr >= ASYNC_BANK0_BASE
466 && addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
469 if (addr >= L1_SCRATCH_START
470 && (addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH))
472 #if L1_CODE_LENGTH != 0
473 if (addr >= L1_CODE_START
474 && (addr + size <= L1_CODE_START + L1_CODE_LENGTH))
477 #if L1_DATA_A_LENGTH != 0
478 if (addr >= L1_DATA_A_START
479 && (addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH))
482 #if L1_DATA_B_LENGTH != 0
483 if (addr >= L1_DATA_B_START
484 && (addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH))
488 } else if (cpu == 1) {
489 if (addr >= COREB_L1_SCRATCH_START
490 && (addr + size <= COREB_L1_SCRATCH_START
491 + L1_SCRATCH_LENGTH))
493 # if L1_CODE_LENGTH != 0
494 if (addr >= COREB_L1_CODE_START
495 && (addr + size <= COREB_L1_CODE_START + L1_CODE_LENGTH))
498 # if L1_DATA_A_LENGTH != 0
499 if (addr >= COREB_L1_DATA_A_START
500 && (addr + size <= COREB_L1_DATA_A_START + L1_DATA_A_LENGTH))
503 # if L1_DATA_B_LENGTH != 0
504 if (addr >= COREB_L1_DATA_B_START
505 && (addr + size <= COREB_L1_DATA_B_START + L1_DATA_B_LENGTH))
513 && addr + size <= L2_START + L2_LENGTH)
521 * Convert the memory pointed to by mem into hex, placing result in buf.
522 * Return a pointer to the last char put in buf (null). May return an error.
524 int kgdb_mem2hex(char *mem, char *buf, int count)
529 unsigned short mmr16;
531 int cpu = raw_smp_processor_id();
533 if (validate_memory_access_address((unsigned long)mem, count))
537 * We use the upper half of buf as an intermediate buffer for the
538 * raw memory copy. Hex conversion will work against this one.
542 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
545 if ((unsigned int)mem % 2 == 0) {
546 mmr16 = *(unsigned short *)mem;
547 pch = (unsigned char *)&mmr16;
555 if ((unsigned int)mem % 4 == 0) {
556 mmr32 = *(unsigned long *)mem;
557 pch = (unsigned char *)&mmr32;
569 } else if ((cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
570 (unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH)
572 || (cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
573 (unsigned int)(mem + count) <=
574 COREB_L1_CODE_START + L1_CODE_LENGTH)
577 /* access L1 instruction SRAM*/
578 if (dma_memcpy(tmp, mem, count) == NULL)
581 err = probe_kernel_read(tmp, mem, count);
585 buf = pack_hex_byte(buf, *tmp);
597 * Copy the binary array pointed to by buf into mem. Fix $, #, and
598 * 0x7d escaped with 0x7d. Return a pointer to the character after
599 * the last byte written.
601 int kgdb_ebin2mem(char *buf, char *mem, int count)
605 unsigned short *mmr16;
606 unsigned long *mmr32;
609 int cpu = raw_smp_processor_id();
611 tmp_old = tmp_new = buf;
613 while (count-- > 0) {
614 if (*tmp_old == 0x7d)
615 *tmp_new = *(++tmp_old) ^ 0x20;
623 if (validate_memory_access_address((unsigned long)mem, size))
626 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
629 if ((unsigned int)mem % 2 == 0) {
630 mmr16 = (unsigned short *)buf;
631 *(unsigned short *)mem = *mmr16;
636 if ((unsigned int)mem % 4 == 0) {
637 mmr32 = (unsigned long *)buf;
638 *(unsigned long *)mem = *mmr32;
645 } else if ((cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
646 (unsigned int)(mem + count) < L1_CODE_START + L1_CODE_LENGTH)
648 || (cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
649 (unsigned int)(mem + count) <=
650 COREB_L1_CODE_START + L1_CODE_LENGTH)
653 /* access L1 instruction SRAM */
654 if (dma_memcpy(mem, buf, size) == NULL)
657 err = probe_kernel_write(mem, buf, size);
663 * Convert the hex array pointed to by buf into binary to be placed in mem.
664 * Return a pointer to the character AFTER the last byte written.
665 * May return an error.
667 int kgdb_hex2mem(char *buf, char *mem, int count)
671 unsigned short *mmr16;
672 unsigned long *mmr32;
673 int cpu = raw_smp_processor_id();
675 if (validate_memory_access_address((unsigned long)mem, count))
679 * We use the upper half of buf as an intermediate buffer for the
680 * raw memory that is converted from hex.
682 tmp_raw = buf + count * 2;
684 tmp_hex = tmp_raw - 1;
685 while (tmp_hex >= buf) {
687 *tmp_raw = hex(*tmp_hex--);
688 *tmp_raw |= hex(*tmp_hex--) << 4;
691 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
694 if ((unsigned int)mem % 2 == 0) {
695 mmr16 = (unsigned short *)tmp_raw;
696 *(unsigned short *)mem = *mmr16;
701 if ((unsigned int)mem % 4 == 0) {
702 mmr32 = (unsigned long *)tmp_raw;
703 *(unsigned long *)mem = *mmr32;
710 } else if ((cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
711 (unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH)
713 || (cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
714 (unsigned int)(mem + count) <=
715 COREB_L1_CODE_START + L1_CODE_LENGTH)
718 /* access L1 instruction SRAM */
719 if (dma_memcpy(mem, tmp_raw, count) == NULL)
722 return probe_kernel_write(mem, tmp_raw, count);
726 int kgdb_validate_break_address(unsigned long addr)
728 int cpu = raw_smp_processor_id();
730 if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end)
732 if (addr >= ASYNC_BANK0_BASE
733 && addr + BREAK_INSTR_SIZE <= ASYNC_BANK3_BASE + ASYNC_BANK3_BASE)
735 #if L1_CODE_LENGTH != 0
736 if (cpu == 0 && addr >= L1_CODE_START
737 && addr + BREAK_INSTR_SIZE <= L1_CODE_START + L1_CODE_LENGTH)
740 else if (cpu == 1 && addr >= COREB_L1_CODE_START
741 && addr + BREAK_INSTR_SIZE <= COREB_L1_CODE_START + L1_CODE_LENGTH)
747 && addr + BREAK_INSTR_SIZE <= L2_START + L2_LENGTH)
754 int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
757 int cpu = raw_smp_processor_id();
759 if ((cpu == 0 && (unsigned int)addr >= L1_CODE_START
760 && (unsigned int)(addr + BREAK_INSTR_SIZE)
761 < L1_CODE_START + L1_CODE_LENGTH)
763 || (cpu == 1 && (unsigned int)addr >= COREB_L1_CODE_START
764 && (unsigned int)(addr + BREAK_INSTR_SIZE)
765 < COREB_L1_CODE_START + L1_CODE_LENGTH)
768 /* access L1 instruction SRAM */
769 if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
773 if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
774 BREAK_INSTR_SIZE) == NULL)
779 err = probe_kernel_read(saved_instr, (char *)addr,
784 return probe_kernel_write((char *)addr,
785 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
789 int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
791 if ((unsigned int)addr >= L1_CODE_START &&
792 (unsigned int)(addr + BREAK_INSTR_SIZE) <
793 L1_CODE_START + L1_CODE_LENGTH) {
794 /* access L1 instruction SRAM */
795 if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
800 return probe_kernel_write((char *)addr,
801 (char *)bundle, BREAK_INSTR_SIZE);
804 int kgdb_arch_init(void)
806 kgdb_single_step = 0;
808 bfin_remove_all_hw_break();
812 void kgdb_arch_exit(void)