td2    .req    r5      @ save before use
 td3    .req    lr
 
-.zero:         mov     r0, sum
+.Lzero:                mov     r0, sum
                add     sp, sp, #4
                ldr     pc, [sp], #4
 
                 * Handle 0 to 7 bytes, with any alignment of source and
                 * destination pointers.  Note that when we get here, C = 0
                 */
-.less8:                teq     len, #0                 @ check for zero count
-               beq     .zero
+.Lless8:               teq     len, #0                 @ check for zero count
+               beq     .Lzero
 
                /* we must have at least one byte. */
                tst     buf, #1                 @ odd address?
                subne   len, len, #1
                adcnes  sum, sum, td0, put_byte_1
 
-.less4:                tst     len, #6
-               beq     .less8_byte
+.Lless4:               tst     len, #6
+               beq     .Lless8_byte
 
                /* we are now half-word aligned */
 
-.less8_wordlp:
+.Lless8_wordlp:
 #if __LINUX_ARM_ARCH__ >= 4
                ldrh    td0, [buf], #2
                sub     len, len, #2
 #endif
                adcs    sum, sum, td0
                tst     len, #6
-               bne     .less8_wordlp
+               bne     .Lless8_wordlp
 
-.less8_byte:   tst     len, #1                 @ odd number of bytes
+.Lless8_byte:  tst     len, #1                 @ odd number of bytes
                ldrneb  td0, [buf], #1          @ include last byte
                adcnes  sum, sum, td0, put_byte_0       @ update checksum
 
-.done:         adc     r0, sum, #0             @ collect up the last carry
+.Ldone:                adc     r0, sum, #0             @ collect up the last carry
                ldr     td0, [sp], #4
                tst     td0, #1                 @ check buffer alignment
                movne   r0, r0, ror #8          @ rotate checksum by 8 bits
                ldr     pc, [sp], #4            @ return
 
-.not_aligned:  tst     buf, #1                 @ odd address
+.Lnot_aligned: tst     buf, #1                 @ odd address
                ldrneb  td0, [buf], #1          @ make even
                subne   len, len, #1
                adcnes  sum, sum, td0, put_byte_1       @ update checksum
 ENTRY(csum_partial)
                stmfd   sp!, {buf, lr}
                cmp     len, #8                 @ Ensure that we have at least
-               blo     .less8                  @ 8 bytes to copy.
+               blo     .Lless8                 @ 8 bytes to copy.
 
                tst     buf, #1
                movne   sum, sum, ror #8
 
                adds    sum, sum, #0            @ C = 0
                tst     buf, #3                 @ Test destination alignment
-               blne    .not_aligned            @ aligh destination, return here
+               blne    .Lnot_aligned           @ align destination, return here
 
 1:             bics    ip, len, #31
                beq     3f
                ldmfd   sp!, {r4 - r5}
 
 3:             tst     len, #0x1c              @ should not change C
-               beq     .less4
+               beq     .Lless4
 
 4:             ldr     td0, [buf], #4
                sub     len, len, #4
                adcs    sum, sum, td0
                tst     len, #0x1c
                bne     4b
-               b       .less4
+               b       .Lless4
 
 len    .req    r2
 sum    .req    r3
 
-.zero:         mov     r0, sum
+.Lzero:                mov     r0, sum
                load_regs       ea
 
                /*
                 * the length.  Note that the source pointer hasn't been
                 * aligned yet.
                 */
-.dst_unaligned:        tst     dst, #1
-               beq     .dst_16bit
+.Ldst_unaligned:
+               tst     dst, #1
+               beq     .Ldst_16bit
 
                load1b  ip
                sub     len, len, #1
                tst     dst, #2
                moveq   pc, lr                  @ dst is now 32bit aligned
 
-.dst_16bit:    load2b  r8, ip
+.Ldst_16bit:   load2b  r8, ip
                sub     len, len, #2
                adcs    sum, sum, r8, put_byte_0
                strb    r8, [dst], #1
                 * Handle 0 to 7 bytes, with any alignment of source and
                 * destination pointers.  Note that when we get here, C = 0
                 */
-.less8:                teq     len, #0                 @ check for zero count
-               beq     .zero
+.Lless8:       teq     len, #0                 @ check for zero count
+               beq     .Lzero
 
                /* we must have at least one byte. */
                tst     dst, #1                 @ dst 16-bit aligned
-               beq     .less8_aligned
+               beq     .Lless8_aligned
 
                /* Align dst */
                load1b  ip
                adcs    sum, sum, ip, put_byte_1        @ update checksum
                strb    ip, [dst], #1
                tst     len, #6
-               beq     .less8_byteonly
+               beq     .Lless8_byteonly
 
 1:             load2b  r8, ip
                sub     len, len, #2
                strb    r8, [dst], #1
                adcs    sum, sum, ip, put_byte_1
                strb    ip, [dst], #1
-.less8_aligned:        tst     len, #6
+.Lless8_aligned:
+               tst     len, #6
                bne     1b
-.less8_byteonly:
+.Lless8_byteonly:
                tst     len, #1
-               beq     .done
+               beq     .Ldone
                load1b  r8
                adcs    sum, sum, r8, put_byte_0        @ update checksum
                strb    r8, [dst], #1
-               b       .done
+               b       .Ldone
 
 FN_ENTRY
                mov     ip, sp
                sub     fp, ip, #4
 
                cmp     len, #8                 @ Ensure that we have at least
-               blo     .less8                  @ 8 bytes to copy.
+               blo     .Lless8                 @ 8 bytes to copy.
 
                adds    sum, sum, #0            @ C = 0
                tst     dst, #3                 @ Test destination alignment
-               blne    .dst_unaligned          @ align destination, return here
+               blne    .Ldst_unaligned         @ align destination, return here
 
                /*
                 * Ok, the dst pointer is now 32bit aligned, and we know
                 */
 
                tst     src, #3                 @ Test source alignment
-               bne     .src_not_aligned
+               bne     .Lsrc_not_aligned
 
                /* Routine for src & dst aligned */
 
                adcs    sum, sum, r4
 
 4:             ands    len, len, #3
-               beq     .done
+               beq     .Ldone
                load1l  r4
                tst     len, #2
                mov     r5, r4, get_byte_0
-               beq     .exit
+               beq     .Lexit
                adcs    sum, sum, r4, push #16
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_1
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_2
-.exit:         tst     len, #1
+.Lexit:                tst     len, #1
                strneb  r5, [dst], #1
                andne   r5, r5, #255
                adcnes  sum, sum, r5, put_byte_0
                 * the inefficient byte manipulations in the
                 * architecture independent code.
                 */
-.done:         adc     r0, sum, #0
+.Ldone:                adc     r0, sum, #0
                ldr     sum, [sp, #0]           @ dst
                tst     sum, #1
                movne   r0, r0, ror #8
                load_regs       ea
 
-.src_not_aligned:
+.Lsrc_not_aligned:
                adc     sum, sum, #0            @ include C from dst alignment
                and     ip, src, #3
                bic     src, src, #3
                load1l  r5
                cmp     ip, #2
-               beq     .src2_aligned
-               bhi     .src3_aligned
+               beq     .Lsrc2_aligned
+               bhi     .Lsrc3_aligned
                mov     r4, r5, pull #8         @ C = 0
                bics    ip, len, #15
                beq     2f
                adcs    sum, sum, r4
                mov     r4, r5, pull #8
 4:             ands    len, len, #3
-               beq     .done
+               beq     .Ldone
                mov     r5, r4, get_byte_0
                tst     len, #2
-               beq     .exit
+               beq     .Lexit
                adcs    sum, sum, r4, push #16
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_1
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_2
-               b       .exit
+               b       .Lexit
 
-.src2_aligned: mov     r4, r5, pull #16
+.Lsrc2_aligned:        mov     r4, r5, pull #16
                adds    sum, sum, #0
                bics    ip, len, #15
                beq     2f
                adcs    sum, sum, r4
                mov     r4, r5, pull #16
 4:             ands    len, len, #3
-               beq     .done
+               beq     .Ldone
                mov     r5, r4, get_byte_0
                tst     len, #2
-               beq     .exit
+               beq     .Lexit
                adcs    sum, sum, r4
                strb    r5, [dst], #1
                mov     r5, r4, get_byte_1
                strb    r5, [dst], #1
                tst     len, #1
-               beq     .done
+               beq     .Ldone
                load1b  r5
-               b       .exit
+               b       .Lexit
 
-.src3_aligned: mov     r4, r5, pull #24
+.Lsrc3_aligned:        mov     r4, r5, pull #24
                adds    sum, sum, #0
                bics    ip, len, #15
                beq     2f
                adcs    sum, sum, r4
                mov     r4, r5, pull #24
 4:             ands    len, len, #3
-               beq     .done
+               beq     .Ldone
                mov     r5, r4, get_byte_0
                tst     len, #2
-               beq     .exit
+               beq     .Lexit
                strb    r5, [dst], #1
                adcs    sum, sum, r4
                load1l  r4
                strb    r5, [dst], #1
                adcs    sum, sum, r4, push #24
                mov     r5, r4, get_byte_1
-               b       .exit
+               b       .Lexit
 
 #include <asm/assembler.h>
                .text
 
-LC0:           .word   loops_per_jiffy
+.LC0:          .word   loops_per_jiffy
 
 /*
  * 0 <= r0 <= 2000
                orr     r2, r2, #0x00db
                mul     r0, r2, r0
 ENTRY(__const_udelay)                          @ 0 <= r0 <= 0x01ffffff
-               ldr     r2, LC0
+               ldr     r2, .LC0
                ldr     r2, [r2]                @ max = 0x0fffffff
                mov     r0, r0, lsr #11         @ max = 0x00003fff
                mov     r2, r2, lsr #11         @ max = 0x0003ffff
 
                mov     r2, #0
 1:             ldrb    r3, [r0, r2, lsr #3]
                eors    r3, r3, #0xff           @ invert bits
-               bne     .found                  @ any now set - found zero bit
+               bne     .L_found                @ any now set - found zero bit
                add     r2, r2, #8              @ next bit pointer
 2:             cmp     r2, r1                  @ any more?
                blo     1b
                ldrb    r3, [r0, r2, lsr #3]
                eor     r3, r3, #0xff           @ now looking for a 1 bit
                movs    r3, r3, lsr ip          @ shift off unused bits
-               bne     .found
+               bne     .L_found
                orr     r2, r2, #7              @ if zero, then no bits here
                add     r2, r2, #1              @ align bit pointer
                b       2b                      @ loop for next bit
                mov     r2, #0
 1:             ldrb    r3, [r0, r2, lsr #3]
                movs    r3, r3
-               bne     .found                  @ any now set - found zero bit
+               bne     .L_found                @ any now set - found zero bit
                add     r2, r2, #8              @ next bit pointer
 2:             cmp     r2, r1                  @ any more?
                blo     1b
                beq     1b                      @ If new byte, goto old routine
                ldrb    r3, [r0, r2, lsr #3]
                movs    r3, r3, lsr ip          @ shift off unused bits
-               bne     .found
+               bne     .L_found
                orr     r2, r2, #7              @ if zero, then no bits here
                add     r2, r2, #1              @ align bit pointer
                b       2b                      @ loop for next bit
 1:             eor     r3, r2, #0x18           @ big endian byte ordering
                ldrb    r3, [r0, r3, lsr #3]
                eors    r3, r3, #0xff           @ invert bits
-               bne     .found                  @ any now set - found zero bit
+               bne     .L_found                @ any now set - found zero bit
                add     r2, r2, #8              @ next bit pointer
 2:             cmp     r2, r1                  @ any more?
                blo     1b
                ldrb    r3, [r0, r3, lsr #3]
                eor     r3, r3, #0xff           @ now looking for a 1 bit
                movs    r3, r3, lsr ip          @ shift off unused bits
-               bne     .found
+               bne     .L_found
                orr     r2, r2, #7              @ if zero, then no bits here
                add     r2, r2, #1              @ align bit pointer
                b       2b                      @ loop for next bit
 1:             eor     r3, r2, #0x18           @ big endian byte ordering
                ldrb    r3, [r0, r3, lsr #3]
                movs    r3, r3
-               bne     .found                  @ any now set - found zero bit
+               bne     .L_found                @ any now set - found zero bit
                add     r2, r2, #8              @ next bit pointer
 2:             cmp     r2, r1                  @ any more?
                blo     1b
                eor     r3, r2, #0x18           @ big endian byte ordering
                ldrb    r3, [r0, r3, lsr #3]
                movs    r3, r3, lsr ip          @ shift off unused bits
-               bne     .found
+               bne     .L_found
                orr     r2, r2, #7              @ if zero, then no bits here
                add     r2, r2, #1              @ align bit pointer
                b       2b                      @ loop for next bit
 /*
  * One or more bits in the LSB of r3 are assumed to be set.
  */
-.found:
+.L_found:
 #if __LINUX_ARM_ARCH__ >= 5
                rsb     r1, r3, #0
                and     r3, r3, r1