2 * arch/blackfin/kernel/kgdb.c - Blackfin kgdb pieces
4 * Copyright 2005-2008 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/string.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/delay.h>
15 #include <linux/ptrace.h> /* for linux pt_regs struct */
16 #include <linux/kgdb.h>
17 #include <linux/console.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/irq.h>
21 #include <linux/uaccess.h>
22 #include <asm/system.h>
23 #include <asm/traps.h>
24 #include <asm/blackfin.h>
27 /* Put the error code here just in case the user cares. */
29 /* Likewise, the vector number here (since GDB only gets the signal
30 number through the usual means, and that's not very specific). */
31 int gdb_bfin_vector = -1;
33 #if KGDB_MAX_NO_CPUS != 8
34 #error change the definition of slavecpulocks
37 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
39 gdb_regs[BFIN_R0] = regs->r0;
40 gdb_regs[BFIN_R1] = regs->r1;
41 gdb_regs[BFIN_R2] = regs->r2;
42 gdb_regs[BFIN_R3] = regs->r3;
43 gdb_regs[BFIN_R4] = regs->r4;
44 gdb_regs[BFIN_R5] = regs->r5;
45 gdb_regs[BFIN_R6] = regs->r6;
46 gdb_regs[BFIN_R7] = regs->r7;
47 gdb_regs[BFIN_P0] = regs->p0;
48 gdb_regs[BFIN_P1] = regs->p1;
49 gdb_regs[BFIN_P2] = regs->p2;
50 gdb_regs[BFIN_P3] = regs->p3;
51 gdb_regs[BFIN_P4] = regs->p4;
52 gdb_regs[BFIN_P5] = regs->p5;
53 gdb_regs[BFIN_SP] = regs->reserved;
54 gdb_regs[BFIN_FP] = regs->fp;
55 gdb_regs[BFIN_I0] = regs->i0;
56 gdb_regs[BFIN_I1] = regs->i1;
57 gdb_regs[BFIN_I2] = regs->i2;
58 gdb_regs[BFIN_I3] = regs->i3;
59 gdb_regs[BFIN_M0] = regs->m0;
60 gdb_regs[BFIN_M1] = regs->m1;
61 gdb_regs[BFIN_M2] = regs->m2;
62 gdb_regs[BFIN_M3] = regs->m3;
63 gdb_regs[BFIN_B0] = regs->b0;
64 gdb_regs[BFIN_B1] = regs->b1;
65 gdb_regs[BFIN_B2] = regs->b2;
66 gdb_regs[BFIN_B3] = regs->b3;
67 gdb_regs[BFIN_L0] = regs->l0;
68 gdb_regs[BFIN_L1] = regs->l1;
69 gdb_regs[BFIN_L2] = regs->l2;
70 gdb_regs[BFIN_L3] = regs->l3;
71 gdb_regs[BFIN_A0_DOT_X] = regs->a0x;
72 gdb_regs[BFIN_A0_DOT_W] = regs->a0w;
73 gdb_regs[BFIN_A1_DOT_X] = regs->a1x;
74 gdb_regs[BFIN_A1_DOT_W] = regs->a1w;
75 gdb_regs[BFIN_ASTAT] = regs->astat;
76 gdb_regs[BFIN_RETS] = regs->rets;
77 gdb_regs[BFIN_LC0] = regs->lc0;
78 gdb_regs[BFIN_LT0] = regs->lt0;
79 gdb_regs[BFIN_LB0] = regs->lb0;
80 gdb_regs[BFIN_LC1] = regs->lc1;
81 gdb_regs[BFIN_LT1] = regs->lt1;
82 gdb_regs[BFIN_LB1] = regs->lb1;
83 gdb_regs[BFIN_CYCLES] = 0;
84 gdb_regs[BFIN_CYCLES2] = 0;
85 gdb_regs[BFIN_USP] = regs->usp;
86 gdb_regs[BFIN_SEQSTAT] = regs->seqstat;
87 gdb_regs[BFIN_SYSCFG] = regs->syscfg;
88 gdb_regs[BFIN_RETI] = regs->pc;
89 gdb_regs[BFIN_RETX] = regs->retx;
90 gdb_regs[BFIN_RETN] = regs->retn;
91 gdb_regs[BFIN_RETE] = regs->rete;
92 gdb_regs[BFIN_PC] = regs->pc;
93 gdb_regs[BFIN_CC] = 0;
94 gdb_regs[BFIN_EXTRA1] = 0;
95 gdb_regs[BFIN_EXTRA2] = 0;
96 gdb_regs[BFIN_EXTRA3] = 0;
97 gdb_regs[BFIN_IPEND] = regs->ipend;
101 * Extracts ebp, esp and eip values understandable by gdb from the values
102 * saved by switch_to.
103 * thread.esp points to ebp. flags and ebp are pushed in switch_to hence esp
104 * prior to entering switch_to is 8 greater then the value that is saved.
105 * If switch_to changes, change following code appropriately.
107 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
109 gdb_regs[BFIN_SP] = p->thread.ksp;
110 gdb_regs[BFIN_PC] = p->thread.pc;
111 gdb_regs[BFIN_SEQSTAT] = p->thread.seqstat;
114 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
116 regs->r0 = gdb_regs[BFIN_R0];
117 regs->r1 = gdb_regs[BFIN_R1];
118 regs->r2 = gdb_regs[BFIN_R2];
119 regs->r3 = gdb_regs[BFIN_R3];
120 regs->r4 = gdb_regs[BFIN_R4];
121 regs->r5 = gdb_regs[BFIN_R5];
122 regs->r6 = gdb_regs[BFIN_R6];
123 regs->r7 = gdb_regs[BFIN_R7];
124 regs->p0 = gdb_regs[BFIN_P0];
125 regs->p1 = gdb_regs[BFIN_P1];
126 regs->p2 = gdb_regs[BFIN_P2];
127 regs->p3 = gdb_regs[BFIN_P3];
128 regs->p4 = gdb_regs[BFIN_P4];
129 regs->p5 = gdb_regs[BFIN_P5];
130 regs->fp = gdb_regs[BFIN_FP];
131 regs->i0 = gdb_regs[BFIN_I0];
132 regs->i1 = gdb_regs[BFIN_I1];
133 regs->i2 = gdb_regs[BFIN_I2];
134 regs->i3 = gdb_regs[BFIN_I3];
135 regs->m0 = gdb_regs[BFIN_M0];
136 regs->m1 = gdb_regs[BFIN_M1];
137 regs->m2 = gdb_regs[BFIN_M2];
138 regs->m3 = gdb_regs[BFIN_M3];
139 regs->b0 = gdb_regs[BFIN_B0];
140 regs->b1 = gdb_regs[BFIN_B1];
141 regs->b2 = gdb_regs[BFIN_B2];
142 regs->b3 = gdb_regs[BFIN_B3];
143 regs->l0 = gdb_regs[BFIN_L0];
144 regs->l1 = gdb_regs[BFIN_L1];
145 regs->l2 = gdb_regs[BFIN_L2];
146 regs->l3 = gdb_regs[BFIN_L3];
147 regs->a0x = gdb_regs[BFIN_A0_DOT_X];
148 regs->a0w = gdb_regs[BFIN_A0_DOT_W];
149 regs->a1x = gdb_regs[BFIN_A1_DOT_X];
150 regs->a1w = gdb_regs[BFIN_A1_DOT_W];
151 regs->rets = gdb_regs[BFIN_RETS];
152 regs->lc0 = gdb_regs[BFIN_LC0];
153 regs->lt0 = gdb_regs[BFIN_LT0];
154 regs->lb0 = gdb_regs[BFIN_LB0];
155 regs->lc1 = gdb_regs[BFIN_LC1];
156 regs->lt1 = gdb_regs[BFIN_LT1];
157 regs->lb1 = gdb_regs[BFIN_LB1];
158 regs->usp = gdb_regs[BFIN_USP];
159 regs->syscfg = gdb_regs[BFIN_SYSCFG];
160 regs->retx = gdb_regs[BFIN_PC];
161 regs->retn = gdb_regs[BFIN_RETN];
162 regs->rete = gdb_regs[BFIN_RETE];
163 regs->pc = gdb_regs[BFIN_PC];
165 #if 0 /* can't change these */
166 regs->astat = gdb_regs[BFIN_ASTAT];
167 regs->seqstat = gdb_regs[BFIN_SEQSTAT];
168 regs->ipend = gdb_regs[BFIN_IPEND];
172 struct hw_breakpoint {
173 unsigned int occupied:1;
175 unsigned int enabled:1;
177 unsigned int dataacc:2;
178 unsigned short count;
180 } breakinfo[HW_WATCHPOINT_NUM];
182 int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
189 case BP_HARDWARE_BREAKPOINT:
190 bfin_type = TYPE_INST_WATCHPOINT;
192 case BP_WRITE_WATCHPOINT:
194 bfin_type = TYPE_DATA_WATCHPOINT;
196 case BP_READ_WATCHPOINT:
198 bfin_type = TYPE_DATA_WATCHPOINT;
200 case BP_ACCESS_WATCHPOINT:
202 bfin_type = TYPE_DATA_WATCHPOINT;
208 /* Becasue hardware data watchpoint impelemented in current
209 * Blackfin can not trigger an exception event as the hardware
210 * instrction watchpoint does, we ignaore all data watch point here.
211 * They can be turned on easily after future blackfin design
212 * supports this feature.
214 for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++)
215 if (bfin_type == breakinfo[breakno].type
216 && !breakinfo[breakno].occupied) {
217 breakinfo[breakno].occupied = 1;
218 breakinfo[breakno].skip = 0;
219 breakinfo[breakno].enabled = 1;
220 breakinfo[breakno].addr = addr;
221 breakinfo[breakno].dataacc = dataacc;
222 breakinfo[breakno].count = 0;
229 int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
235 case BP_HARDWARE_BREAKPOINT:
236 bfin_type = TYPE_INST_WATCHPOINT;
238 case BP_WRITE_WATCHPOINT:
239 case BP_READ_WATCHPOINT:
240 case BP_ACCESS_WATCHPOINT:
241 bfin_type = TYPE_DATA_WATCHPOINT;
246 for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++)
247 if (bfin_type == breakinfo[breakno].type
248 && breakinfo[breakno].occupied
249 && breakinfo[breakno].addr == addr) {
250 breakinfo[breakno].occupied = 0;
251 breakinfo[breakno].enabled = 0;
257 void bfin_remove_all_hw_break(void)
261 memset(breakinfo, 0, sizeof(struct hw_breakpoint)*HW_WATCHPOINT_NUM);
263 for (breakno = 0; breakno < HW_INST_WATCHPOINT_NUM; breakno++)
264 breakinfo[breakno].type = TYPE_INST_WATCHPOINT;
265 for (; breakno < HW_WATCHPOINT_NUM; breakno++)
266 breakinfo[breakno].type = TYPE_DATA_WATCHPOINT;
269 void bfin_correct_hw_break(void)
272 unsigned int wpiactl = 0;
273 unsigned int wpdactl = 0;
276 for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++)
277 if (breakinfo[breakno].enabled) {
282 wpiactl |= WPIAEN0|WPICNTEN0;
283 bfin_write_WPIA0(breakinfo[breakno].addr);
284 bfin_write_WPIACNT0(breakinfo[breakno].count
288 wpiactl |= WPIAEN1|WPICNTEN1;
289 bfin_write_WPIA1(breakinfo[breakno].addr);
290 bfin_write_WPIACNT1(breakinfo[breakno].count
294 wpiactl |= WPIAEN2|WPICNTEN2;
295 bfin_write_WPIA2(breakinfo[breakno].addr);
296 bfin_write_WPIACNT2(breakinfo[breakno].count
300 wpiactl |= WPIAEN3|WPICNTEN3;
301 bfin_write_WPIA3(breakinfo[breakno].addr);
302 bfin_write_WPIACNT3(breakinfo[breakno].count
306 wpiactl |= WPIAEN4|WPICNTEN4;
307 bfin_write_WPIA4(breakinfo[breakno].addr);
308 bfin_write_WPIACNT4(breakinfo[breakno].count
312 wpiactl |= WPIAEN5|WPICNTEN5;
313 bfin_write_WPIA5(breakinfo[breakno].addr);
314 bfin_write_WPIACNT5(breakinfo[breakno].count
318 wpdactl |= WPDAEN0|WPDCNTEN0|WPDSRC0;
319 wpdactl |= breakinfo[breakno].dataacc
321 bfin_write_WPDA0(breakinfo[breakno].addr);
322 bfin_write_WPDACNT0(breakinfo[breakno].count
326 wpdactl |= WPDAEN1|WPDCNTEN1|WPDSRC1;
327 wpdactl |= breakinfo[breakno].dataacc
329 bfin_write_WPDA1(breakinfo[breakno].addr);
330 bfin_write_WPDACNT1(breakinfo[breakno].count
336 /* Should enable WPPWR bit first before set any other
337 * WPIACTL and WPDACTL bits */
339 bfin_write_WPIACTL(WPPWR);
341 bfin_write_WPIACTL(wpiactl|WPPWR);
342 bfin_write_WPDACTL(wpdactl);
347 void kgdb_disable_hw_debug(struct pt_regs *regs)
349 /* Disable hardware debugging while we are in kgdb */
350 bfin_write_WPIACTL(0);
351 bfin_write_WPDACTL(0);
356 void kgdb_passive_cpu_callback(void *info)
358 kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
361 void kgdb_roundup_cpus(unsigned long flags)
363 smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
366 void kgdb_roundup_cpu(int cpu, unsigned long flags)
368 smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
372 void kgdb_post_primary_code(struct pt_regs *regs, int eVector, int err_code)
374 /* Master processor is completely in the debugger */
375 gdb_bfin_vector = eVector;
376 gdb_bfin_errcode = err_code;
379 int kgdb_arch_handle_exception(int vector, int signo,
380 int err_code, char *remcom_in_buffer,
381 char *remcom_out_buffer,
382 struct pt_regs *regs)
391 switch (remcom_in_buffer[0]) {
394 if (kgdb_contthread && kgdb_contthread != current) {
395 strcpy(remcom_out_buffer, "E00");
399 kgdb_contthread = NULL;
401 /* try to read optional parameter, pc unchanged if no parm */
402 ptr = &remcom_in_buffer[1];
403 if (kgdb_hex2long(&ptr, &addr)) {
408 /* clear the trace bit */
409 regs->syscfg &= 0xfffffffe;
411 /* set the trace bit if we're stepping */
412 if (remcom_in_buffer[0] == 's') {
414 kgdb_single_step = regs->ipend;
415 kgdb_single_step >>= 6;
416 for (i = 10; i > 0; i--, kgdb_single_step >>= 1)
417 if (kgdb_single_step & 1)
419 /* i indicate event priority of current stopped instruction
420 * user space instruction is 0, IVG15 is 1, IVTMR is 10.
421 * kgdb_single_step > 0 means in single step mode
423 kgdb_single_step = i + 1;
426 bfin_correct_hw_break();
430 return -1; /* this means that we do not want to exit from the handler */
433 struct kgdb_arch arch_kgdb_ops = {
434 .gdb_bpt_instr = {0xa1},
436 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
438 .flags = KGDB_HW_BREAKPOINT,
440 .set_hw_breakpoint = bfin_set_hw_break,
441 .remove_hw_breakpoint = bfin_remove_hw_break,
442 .remove_all_hw_break = bfin_remove_all_hw_break,
443 .correct_hw_break = bfin_correct_hw_break,
446 static int hex(char ch)
448 if ((ch >= 'a') && (ch <= 'f'))
449 return ch - 'a' + 10;
450 if ((ch >= '0') && (ch <= '9'))
452 if ((ch >= 'A') && (ch <= 'F'))
453 return ch - 'A' + 10;
457 static int validate_memory_access_address(unsigned long addr, int size)
459 int cpu = raw_smp_processor_id();
463 if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
465 if (addr >= SYSMMR_BASE)
467 if (addr >= ASYNC_BANK0_BASE
468 && addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
471 if (addr >= L1_SCRATCH_START
472 && (addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH))
474 #if L1_CODE_LENGTH != 0
475 if (addr >= L1_CODE_START
476 && (addr + size <= L1_CODE_START + L1_CODE_LENGTH))
479 #if L1_DATA_A_LENGTH != 0
480 if (addr >= L1_DATA_A_START
481 && (addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH))
484 #if L1_DATA_B_LENGTH != 0
485 if (addr >= L1_DATA_B_START
486 && (addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH))
490 } else if (cpu == 1) {
491 if (addr >= COREB_L1_SCRATCH_START
492 && (addr + size <= COREB_L1_SCRATCH_START
493 + L1_SCRATCH_LENGTH))
495 # if L1_CODE_LENGTH != 0
496 if (addr >= COREB_L1_CODE_START
497 && (addr + size <= COREB_L1_CODE_START + L1_CODE_LENGTH))
500 # if L1_DATA_A_LENGTH != 0
501 if (addr >= COREB_L1_DATA_A_START
502 && (addr + size <= COREB_L1_DATA_A_START + L1_DATA_A_LENGTH))
505 # if L1_DATA_B_LENGTH != 0
506 if (addr >= COREB_L1_DATA_B_START
507 && (addr + size <= COREB_L1_DATA_B_START + L1_DATA_B_LENGTH))
515 && addr + size <= L2_START + L2_LENGTH)
523 * Convert the memory pointed to by mem into hex, placing result in buf.
524 * Return a pointer to the last char put in buf (null). May return an error.
526 int kgdb_mem2hex(char *mem, char *buf, int count)
531 unsigned short mmr16;
533 int cpu = raw_smp_processor_id();
535 if (validate_memory_access_address((unsigned long)mem, count))
539 * We use the upper half of buf as an intermediate buffer for the
540 * raw memory copy. Hex conversion will work against this one.
544 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
547 if ((unsigned int)mem % 2 == 0) {
548 mmr16 = *(unsigned short *)mem;
549 pch = (unsigned char *)&mmr16;
557 if ((unsigned int)mem % 4 == 0) {
558 mmr32 = *(unsigned long *)mem;
559 pch = (unsigned char *)&mmr32;
571 } else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
572 (unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH
574 || cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
575 (unsigned int)(mem + count) <=
576 COREB_L1_CODE_START + L1_CODE_LENGTH
579 /* access L1 instruction SRAM*/
580 if (dma_memcpy(tmp, mem, count) == NULL)
583 err = probe_kernel_read(tmp, mem, count);
587 buf = pack_hex_byte(buf, *tmp);
599 * Copy the binary array pointed to by buf into mem. Fix $, #, and
600 * 0x7d escaped with 0x7d. Return a pointer to the character after
601 * the last byte written.
603 int kgdb_ebin2mem(char *buf, char *mem, int count)
607 unsigned short *mmr16;
608 unsigned long *mmr32;
611 int cpu = raw_smp_processor_id();
613 tmp_old = tmp_new = buf;
615 while (count-- > 0) {
616 if (*tmp_old == 0x7d)
617 *tmp_new = *(++tmp_old) ^ 0x20;
625 if (validate_memory_access_address((unsigned long)mem, size))
628 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
631 if ((unsigned int)mem % 2 == 0) {
632 mmr16 = (unsigned short *)buf;
633 *(unsigned short *)mem = *mmr16;
638 if ((unsigned int)mem % 4 == 0) {
639 mmr32 = (unsigned long *)buf;
640 *(unsigned long *)mem = *mmr32;
647 } else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
648 (unsigned int)(mem + count) < L1_CODE_START + L1_CODE_LENGTH
650 || cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
651 (unsigned int)(mem + count) <=
652 COREB_L1_CODE_START + L1_CODE_LENGTH
655 /* access L1 instruction SRAM */
656 if (dma_memcpy(mem, buf, size) == NULL)
659 err = probe_kernel_write(mem, buf, size);
665 * Convert the hex array pointed to by buf into binary to be placed in mem.
666 * Return a pointer to the character AFTER the last byte written.
667 * May return an error.
669 int kgdb_hex2mem(char *buf, char *mem, int count)
673 unsigned short *mmr16;
674 unsigned long *mmr32;
675 int cpu = raw_smp_processor_id();
677 if (validate_memory_access_address((unsigned long)mem, count))
681 * We use the upper half of buf as an intermediate buffer for the
682 * raw memory that is converted from hex.
684 tmp_raw = buf + count * 2;
686 tmp_hex = tmp_raw - 1;
687 while (tmp_hex >= buf) {
689 *tmp_raw = hex(*tmp_hex--);
690 *tmp_raw |= hex(*tmp_hex--) << 4;
693 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
696 if ((unsigned int)mem % 2 == 0) {
697 mmr16 = (unsigned short *)tmp_raw;
698 *(unsigned short *)mem = *mmr16;
703 if ((unsigned int)mem % 4 == 0) {
704 mmr32 = (unsigned long *)tmp_raw;
705 *(unsigned long *)mem = *mmr32;
712 } else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
713 (unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH
715 || cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
716 (unsigned int)(mem + count) <=
717 COREB_L1_CODE_START + L1_CODE_LENGTH
720 /* access L1 instruction SRAM */
721 if (dma_memcpy(mem, tmp_raw, count) == NULL)
724 return probe_kernel_write(mem, tmp_raw, count);
728 int kgdb_validate_break_address(unsigned long addr)
730 int cpu = raw_smp_processor_id();
732 if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end)
734 if (addr >= ASYNC_BANK0_BASE
735 && addr + BREAK_INSTR_SIZE <= ASYNC_BANK3_BASE + ASYNC_BANK3_BASE)
737 #if L1_CODE_LENGTH != 0
738 if (cpu == 0 && addr >= L1_CODE_START
739 && addr + BREAK_INSTR_SIZE <= L1_CODE_START + L1_CODE_LENGTH)
742 else if (cpu == 1 && addr >= COREB_L1_CODE_START
743 && addr + BREAK_INSTR_SIZE <= COREB_L1_CODE_START + L1_CODE_LENGTH)
749 && addr + BREAK_INSTR_SIZE <= L2_START + L2_LENGTH)
756 int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
759 int cpu = raw_smp_processor_id();
761 if ((cpu == 0 && (unsigned int)addr >= L1_CODE_START
762 && (unsigned int)(addr + BREAK_INSTR_SIZE)
763 < L1_CODE_START + L1_CODE_LENGTH)
765 || (cpu == 1 && (unsigned int)addr >= COREB_L1_CODE_START
766 && (unsigned int)(addr + BREAK_INSTR_SIZE)
767 < COREB_L1_CODE_START + L1_CODE_LENGTH)
770 /* access L1 instruction SRAM */
771 if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
775 if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
776 BREAK_INSTR_SIZE) == NULL)
781 err = probe_kernel_read(saved_instr, (char *)addr,
786 return probe_kernel_write((char *)addr,
787 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
791 int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
793 if ((unsigned int)addr >= L1_CODE_START &&
794 (unsigned int)(addr + BREAK_INSTR_SIZE) <
795 L1_CODE_START + L1_CODE_LENGTH) {
796 /* access L1 instruction SRAM */
797 if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
802 return probe_kernel_write((char *)addr,
803 (char *)bundle, BREAK_INSTR_SIZE);
806 int kgdb_arch_init(void)
808 kgdb_single_step = 0;
810 bfin_remove_all_hw_break();
814 void kgdb_arch_exit(void)