2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
15 * it to save wrong values... Be aware!
18 #include <asm/memory.h>
20 #include <asm/vfpmacros.h>
21 #include <asm/arch/entry-macro.S>
22 #include <asm/thread_notify.h>
24 #include "entry-header.S"
27 * Interrupt handling. Preserves r7, r8, r9
30 1: get_irqnr_and_base r0, r6, r5, lr
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
45 test_for_ipi r0, r6, r5, lr
50 #ifdef CONFIG_LOCAL_TIMERS
51 test_for_ltirq r0, r6, r5, lr
61 * Invalid mode handlers
63 .macro inv_entry, reason
64 sub sp, sp, #S_FRAME_SIZE
70 inv_entry BAD_PREFETCH
82 inv_entry BAD_UNDEFINSTR
85 @ XXX fall through to common_invalid
89 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
95 add r0, sp, #S_PC @ here for interlock avoidance
96 mov r7, #-1 @ "" "" "" ""
97 str r4, [sp] @ save preserved r0
98 stmia r0, {r5 - r7} @ lr_<exception>,
99 @ cpsr_<exception>, "old_r0"
109 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
110 #define SPFIX(code...) code
112 #define SPFIX(code...)
116 sub sp, sp, #S_FRAME_SIZE
118 SPFIX( bicne sp, sp, #4 )
122 add r5, sp, #S_SP @ here for interlock avoidance
123 mov r4, #-1 @ "" "" "" ""
124 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
125 SPFIX( addne r0, r0, #4 )
126 str r1, [sp] @ save the "real" r0 copied
127 @ from the exception stack
132 @ We are now ready to fill in the remaining blanks on the stack:
136 @ r2 - lr_<exception>, already fixed up for correct return/restart
137 @ r3 - spsr_<exception>
138 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
148 @ get ready to re-enable interrupts if appropriate
152 biceq r9, r9, #PSR_I_BIT
155 @ Call the processor-specific abort handler:
157 @ r2 - aborted context pc
158 @ r3 - aborted context cpsr
160 @ The abort handler must return the aborted address in r0, and
161 @ the fault status register in r1. r9 must be preserved.
172 @ set desired IRQ state, then call main handler
179 @ IRQs off again before pulling preserved data off the stack
184 @ restore SPSR and restart the instruction
188 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
194 #ifdef CONFIG_TRACE_IRQFLAGS
195 bl trace_hardirqs_off
197 #ifdef CONFIG_PREEMPT
199 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
200 add r7, r8, #1 @ increment it
201 str r7, [tsk, #TI_PREEMPT]
205 #ifdef CONFIG_PREEMPT
206 ldr r0, [tsk, #TI_FLAGS] @ get flags
207 tst r0, #_TIF_NEED_RESCHED
210 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
211 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
213 strne r0, [r0, -r0] @ bug()
215 ldr r0, [sp, #S_PSR] @ irqs are already disabled
217 #ifdef CONFIG_TRACE_IRQFLAGS
219 bleq trace_hardirqs_on
221 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
225 #ifdef CONFIG_PREEMPT
227 teq r8, #0 @ was preempt count = 0
228 ldreq r6, .LCirq_stat
230 ldr r0, [r6, #4] @ local_irq_count
231 ldr r1, [r6, #8] @ local_bh_count
234 mov r7, #0 @ preempt_schedule_irq
235 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
236 1: bl preempt_schedule_irq @ irq en/disable is done inside
237 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
238 tst r0, #_TIF_NEED_RESCHED
239 beq preempt_return @ go again
248 @ call emulation code, which returns using r9 if it has emulated
249 @ the instruction, or the more conventional lr if we are to treat
250 @ this as a real undefined instruction
258 mov r0, sp @ struct pt_regs *regs
262 @ IRQs off again before pulling preserved data off the stack
267 @ restore SPSR and restart the instruction
269 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
271 ldmia sp, {r0 - pc}^ @ Restore SVC registers
278 @ re-enable interrupts if appropriate
282 biceq r9, r9, #PSR_I_BIT
286 @ set args, then call main handler
288 @ r0 - address of faulting instruction
289 @ r1 - pointer to registers on stack
291 mov r0, r2 @ address (pc)
293 bl do_PrefetchAbort @ call abort handler
296 @ IRQs off again before pulling preserved data off the stack
301 @ restore SPSR and restart the instruction
305 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
316 #ifdef CONFIG_PREEMPT
324 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
327 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
328 #error "sizeof(struct pt_regs) must be a multiple of 8"
332 sub sp, sp, #S_FRAME_SIZE
336 add r0, sp, #S_PC @ here for interlock avoidance
337 mov r4, #-1 @ "" "" "" ""
339 str r1, [sp] @ save the "real" r0 copied
340 @ from the exception stack
342 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
344 #warning "NPTL on non MMU needs fixing"
346 @ make sure our user space atomic helper is aborted
348 bichs r3, r3, #PSR_Z_BIT
353 @ We are now ready to fill in the remaining blanks on the stack:
355 @ r2 - lr_<exception>, already fixed up for correct return/restart
356 @ r3 - spsr_<exception>
357 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
359 @ Also, separately save sp_usr and lr_usr
365 @ Enable the alignment trap while in kernel mode
370 @ Clear FP to mark the first stack frame
380 @ Call the processor-specific abort handler:
382 @ r2 - aborted context pc
383 @ r3 - aborted context cpsr
385 @ The abort handler must return the aborted address in r0, and
386 @ the fault status register in r1.
397 @ IRQs on, then call the main handler
401 adr lr, ret_from_exception
408 #ifdef CONFIG_TRACE_IRQFLAGS
409 bl trace_hardirqs_off
412 #ifdef CONFIG_PREEMPT
413 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
414 add r7, r8, #1 @ increment it
415 str r7, [tsk, #TI_PREEMPT]
419 #ifdef CONFIG_PREEMPT
420 ldr r0, [tsk, #TI_PREEMPT]
421 str r8, [tsk, #TI_PREEMPT]
425 #ifdef CONFIG_TRACE_IRQFLAGS
438 tst r3, #PSR_T_BIT @ Thumb mode?
439 bne fpundefinstr @ ignore FP
443 @ fall through to the emulation code, which returns using r9 if
444 @ it has emulated the instruction, or the more conventional lr
445 @ if we are to treat this as a real undefined instruction
450 adr r9, ret_from_exception
453 @ fallthrough to call_fpe
457 * The out of line fixup for the ldrt above.
459 .section .fixup, "ax"
462 .section __ex_table,"a"
467 * Check whether the instruction is a co-processor instruction.
468 * If yes, we need to call the relevant co-processor handler.
470 * Note that we don't do a full check here for the co-processor
471 * instructions; all instructions with bit 27 set are well
472 * defined. The only instructions that should fault are the
473 * co-processor instructions. However, we have to watch out
474 * for the ARM6/ARM7 SWI bug.
476 * Emulators may wish to make use of the following registers:
477 * r0 = instruction opcode.
479 * r10 = this threads thread_info structure.
482 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
483 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
484 and r8, r0, #0x0f000000 @ mask out op-code bits
485 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
488 get_thread_info r10 @ get current thread
489 and r8, r0, #0x00000f00 @ mask out CP number
491 add r6, r10, #TI_USED_CP
492 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
494 @ Test if we need to give access to iWMMXt coprocessors
495 ldr r5, [r10, #TI_FLAGS]
496 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
497 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
498 bcs iwmmxt_task_enable
500 add pc, pc, r8, lsr #6
504 b do_fpe @ CP#1 (FPE)
505 b do_fpe @ CP#2 (FPE)
508 b crunch_task_enable @ CP#4 (MaverickCrunch)
509 b crunch_task_enable @ CP#5 (MaverickCrunch)
510 b crunch_task_enable @ CP#6 (MaverickCrunch)
520 b do_vfp @ CP#10 (VFP)
521 b do_vfp @ CP#11 (VFP)
523 mov pc, lr @ CP#10 (VFP)
524 mov pc, lr @ CP#11 (VFP)
528 mov pc, lr @ CP#14 (Debug)
529 mov pc, lr @ CP#15 (Control)
534 add r10, r10, #TI_FPSTATE @ r10 = workspace
535 ldr pc, [r4] @ Call FP module USR entry point
538 * The FP module is called with these registers set:
541 * r9 = normal "successful" return address
543 * lr = unrecognised FP instruction return address
553 adr lr, ret_from_exception
560 enable_irq @ Enable interrupts
561 mov r0, r2 @ address (pc)
563 bl do_PrefetchAbort @ call abort handler
566 * This is the return code to user mode for abort handlers
568 ENTRY(ret_from_exception)
574 * Register switch for ARMv3 and ARMv4 processors
575 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
576 * previous and next are guaranteed not to be the same.
579 add ip, r1, #TI_CPU_SAVE
580 ldr r3, [r2, #TI_TP_VALUE]
581 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
583 ldr r6, [r2, #TI_CPU_DOMAIN]
585 #if __LINUX_ARM_ARCH__ >= 6
586 #ifdef CONFIG_CPU_32v6K
589 strex r5, r4, [ip] @ Clear exclusive monitor
592 #if defined(CONFIG_HAS_TLS_REG)
593 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
594 #elif !defined(CONFIG_TLS_REG_EMUL)
596 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
599 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
602 add r4, r2, #TI_CPU_SAVE
603 ldr r0, =thread_notify_head
604 mov r1, #THREAD_NOTIFY_SWITCH
605 bl atomic_notifier_call_chain
607 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
614 * These are segment of kernel provided user code reachable from user space
615 * at a fixed address in kernel memory. This is used to provide user space
616 * with some operations which require kernel help because of unimplemented
617 * native feature and/or instructions in many ARM CPUs. The idea is for
618 * this code to be executed directly in user mode for best efficiency but
619 * which is too intimate with the kernel counter part to be left to user
620 * libraries. In fact this code might even differ from one CPU to another
621 * depending on the available instruction set and restrictions like on
622 * SMP systems. In other words, the kernel reserves the right to change
623 * this code as needed without warning. Only the entry points and their
624 * results are guaranteed to be stable.
626 * Each segment is 32-byte aligned and will be moved to the top of the high
627 * vector page. New segments (if ever needed) must be added in front of
628 * existing ones. This mechanism should be used only for things that are
629 * really small and justified, and not be abused freely.
631 * User space is expected to implement those things inline when optimizing
632 * for a processor that has the necessary native support, but only if such
633 * resulting binaries are already to be incompatible with earlier ARM
634 * processors due to the use of unsupported instructions other than what
635 * is provided here. In other words don't make binaries unable to run on
636 * earlier processors just for the sake of not using these kernel helpers
637 * if your compiled code is not going to use the new instructions for other
642 #ifdef CONFIG_ARM_THUMB
650 .globl __kuser_helper_start
651 __kuser_helper_start:
654 * Reference prototype:
656 * void __kernel_memory_barrier(void)
660 * lr = return address
668 * the Z flag might be lost
670 * Definition and user space usage example:
672 * typedef void (__kernel_dmb_t)(void);
673 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
675 * Apply any needed memory barrier to preserve consistency with data modified
676 * manually and __kuser_cmpxchg usage.
678 * This could be used as follows:
680 * #define __kernel_dmb() \
681 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
682 * : : : "r0", "lr","cc" )
685 __kuser_memory_barrier: @ 0xffff0fa0
687 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
688 mcr p15, 0, r0, c7, c10, 5 @ dmb
695 * Reference prototype:
697 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
704 * lr = return address
708 * r0 = returned value (zero or non-zero)
709 * C flag = set if r0 == 0, clear if r0 != 0
715 * Definition and user space usage example:
717 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
718 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
720 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
721 * Return zero if *ptr was changed or non-zero if no exchange happened.
722 * The C flag is also set if *ptr was changed to allow for assembly
723 * optimization in the calling code.
727 * - This routine already includes memory barriers as needed.
729 * - A failure might be transient, i.e. it is possible, although unlikely,
730 * that "failure" be returned even if *ptr == oldval.
732 * For example, a user space atomic_add implementation could look like this:
734 * #define atomic_add(ptr, val) \
735 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
736 * register unsigned int __result asm("r1"); \
738 * "1: @ atomic_add\n\t" \
739 * "ldr r0, [r2]\n\t" \
740 * "mov r3, #0xffff0fff\n\t" \
741 * "add lr, pc, #4\n\t" \
742 * "add r1, r0, %2\n\t" \
743 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
745 * : "=&r" (__result) \
746 * : "r" (__ptr), "rIL" (val) \
747 * : "r0","r3","ip","lr","cc","memory" ); \
751 __kuser_cmpxchg: @ 0xffff0fc0
753 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
756 * Poor you. No fast solution possible...
757 * The kernel itself must perform the operation.
758 * A special ghost syscall is used for that (see traps.c).
761 mov r7, #0xff00 @ 0xfff0 into r7 for EABI
766 #elif __LINUX_ARM_ARCH__ < 6
769 * Theory of operation:
771 * We set the Z flag before loading oldval. If ever an exception
772 * occurs we can not be sure the loaded value will still be the same
773 * when the exception returns, therefore the user exception handler
774 * will clear the Z flag whenever the interrupted user code was
775 * actually from the kernel address space (see the usr_entry macro).
777 * The post-increment on the str is used to prevent a race with an
778 * exception happening just after the str instruction which would
779 * clear the Z flag although the exchange was done.
782 teq ip, ip @ set Z flag
783 ldr ip, [r2] @ load current val
784 add r3, r2, #1 @ prepare store ptr
785 teqeq ip, r0 @ compare with oldval if still allowed
786 streq r1, [r3, #-1]! @ store newval if still allowed
787 subs r0, r2, r3 @ if r2 == r3 the str occured
789 #warning "NPTL on non MMU needs fixing"
798 mcr p15, 0, r0, c7, c10, 5 @ dmb
805 mcr p15, 0, r0, c7, c10, 5 @ dmb
814 * Reference prototype:
816 * int __kernel_get_tls(void)
820 * lr = return address
828 * the Z flag might be lost
830 * Definition and user space usage example:
832 * typedef int (__kernel_get_tls_t)(void);
833 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
835 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
837 * This could be used as follows:
839 * #define __kernel_get_tls() \
840 * ({ register unsigned int __val asm("r0"); \
841 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
842 * : "=r" (__val) : : "lr","cc" ); \
846 __kuser_get_tls: @ 0xffff0fe0
848 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
849 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
851 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
856 .word 0 @ pad up to __kuser_helper_version
860 * Reference declaration:
862 * extern unsigned int __kernel_helper_version;
864 * Definition and user space usage example:
866 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
868 * User space may read this to determine the curent number of helpers
872 __kuser_helper_version: @ 0xffff0ffc
873 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
875 .globl __kuser_helper_end
882 * This code is copied to 0xffff0200 so we can use branches in the
883 * vectors, rather than ldr's. Note that this code must not
884 * exceed 0x300 bytes.
886 * Common stub entry macro:
887 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
889 * SP points to a minimal amount of processor-private memory, the address
890 * of which is copied into r0 for the mode specific abort handler.
892 .macro vector_stub, name, mode, correction=0
897 sub lr, lr, #\correction
901 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
904 stmia sp, {r0, lr} @ save r0, lr
906 str lr, [sp, #8] @ save spsr
909 @ Prepare for SVC32 mode. IRQs remain disabled.
912 eor r0, r0, #(\mode ^ SVC_MODE)
916 @ the branch table must immediately follow this code
920 ldr lr, [pc, lr, lsl #2]
921 movs pc, lr @ branch to handler in SVC mode
927 * Interrupt dispatcher
929 vector_stub irq, IRQ_MODE, 4
931 .long __irq_usr @ 0 (USR_26 / USR_32)
932 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
933 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
934 .long __irq_svc @ 3 (SVC_26 / SVC_32)
935 .long __irq_invalid @ 4
936 .long __irq_invalid @ 5
937 .long __irq_invalid @ 6
938 .long __irq_invalid @ 7
939 .long __irq_invalid @ 8
940 .long __irq_invalid @ 9
941 .long __irq_invalid @ a
942 .long __irq_invalid @ b
943 .long __irq_invalid @ c
944 .long __irq_invalid @ d
945 .long __irq_invalid @ e
946 .long __irq_invalid @ f
949 * Data abort dispatcher
950 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
952 vector_stub dabt, ABT_MODE, 8
954 .long __dabt_usr @ 0 (USR_26 / USR_32)
955 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
956 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
957 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
958 .long __dabt_invalid @ 4
959 .long __dabt_invalid @ 5
960 .long __dabt_invalid @ 6
961 .long __dabt_invalid @ 7
962 .long __dabt_invalid @ 8
963 .long __dabt_invalid @ 9
964 .long __dabt_invalid @ a
965 .long __dabt_invalid @ b
966 .long __dabt_invalid @ c
967 .long __dabt_invalid @ d
968 .long __dabt_invalid @ e
969 .long __dabt_invalid @ f
972 * Prefetch abort dispatcher
973 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
975 vector_stub pabt, ABT_MODE, 4
977 .long __pabt_usr @ 0 (USR_26 / USR_32)
978 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
979 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
980 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
981 .long __pabt_invalid @ 4
982 .long __pabt_invalid @ 5
983 .long __pabt_invalid @ 6
984 .long __pabt_invalid @ 7
985 .long __pabt_invalid @ 8
986 .long __pabt_invalid @ 9
987 .long __pabt_invalid @ a
988 .long __pabt_invalid @ b
989 .long __pabt_invalid @ c
990 .long __pabt_invalid @ d
991 .long __pabt_invalid @ e
992 .long __pabt_invalid @ f
995 * Undef instr entry dispatcher
996 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
998 vector_stub und, UND_MODE
1000 .long __und_usr @ 0 (USR_26 / USR_32)
1001 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1002 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1003 .long __und_svc @ 3 (SVC_26 / SVC_32)
1004 .long __und_invalid @ 4
1005 .long __und_invalid @ 5
1006 .long __und_invalid @ 6
1007 .long __und_invalid @ 7
1008 .long __und_invalid @ 8
1009 .long __und_invalid @ 9
1010 .long __und_invalid @ a
1011 .long __und_invalid @ b
1012 .long __und_invalid @ c
1013 .long __und_invalid @ d
1014 .long __und_invalid @ e
1015 .long __und_invalid @ f
1019 /*=============================================================================
1021 *-----------------------------------------------------------------------------
1022 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1023 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1024 * Basically to switch modes, we *HAVE* to clobber one register... brain
1025 * damage alert! I don't think that we can execute any code in here in any
1026 * other mode than FIQ... Ok you can switch to another mode, but you can't
1027 * get out of that mode without clobbering one register.
1033 /*=============================================================================
1034 * Address exception handler
1035 *-----------------------------------------------------------------------------
1036 * These aren't too critical.
1037 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1044 * We group all the following data together to optimise
1045 * for CPUs with separate I & D caches.
1055 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1057 .globl __vectors_start
1060 b vector_und + stubs_offset
1061 ldr pc, .LCvswi + stubs_offset
1062 b vector_pabt + stubs_offset
1063 b vector_dabt + stubs_offset
1064 b vector_addrexcptn + stubs_offset
1065 b vector_irq + stubs_offset
1066 b vector_fiq + stubs_offset
1068 .globl __vectors_end
1074 .globl cr_no_alignment