]> pilppa.org Git - familiar-h63xx-build.git/blob - org.handhelds.familiar/packages/linux/openslug-kernel-2.6.9/alignment.patch
OE tree imported from monotone branch org.openembedded.oz354fam083 at revision 8b12e3...
[familiar-h63xx-build.git] / org.handhelds.familiar / packages / linux / openslug-kernel-2.6.9 / alignment.patch
1 --- linux-2.6.10/arch/arm/mm/alignment.c.broken 2005-02-11 20:57:58.353668651 +0100
2 +++ linux-2.6.10/arch/arm/mm/alignment.c        2005-02-11 20:58:02.141660877 +0100
3 @@ -130,6 +130,18 @@
4  #define TYPE_LDST      2
5  #define TYPE_DONE      3
6  
7 +#ifdef __ARMEB__
8 +#define BE             1
9 +#define FIRST_BYTE_16  "mov    %1, %1, ror #8\n"
10 +#define FIRST_BYTE_32  "mov    %1, %1, ror #24\n"
11 +#define NEXT_BYTE      "ror #24"
12 +#else
13 +#define BE             0
14 +#define FIRST_BYTE_16
15 +#define FIRST_BYTE_32
16 +#define NEXT_BYTE      "lsr #8"
17 +#endif
18 +
19  #define __get8_unaligned_check(ins,val,addr,err)       \
20         __asm__(                                        \
21         "1:     "ins"   %1, [%2], #1\n"                 \
22 @@ -149,9 +161,10 @@
23  #define __get16_unaligned_check(ins,val,addr)                  \
24         do {                                                    \
25                 unsigned int err = 0, v, a = addr;              \
26 -               __get8_unaligned_check(ins,val,a,err);          \
27                 __get8_unaligned_check(ins,v,a,err);            \
28 -               val |= v << 8;                                  \
29 +               val =  v << ((BE) ? 8 : 0);                     \
30 +               __get8_unaligned_check(ins,v,a,err);            \
31 +               val |= v << ((BE) ? 0 : 8);                     \
32                 if (err)                                        \
33                         goto fault;                             \
34         } while (0)
35 @@ -165,13 +178,14 @@
36  #define __get32_unaligned_check(ins,val,addr)                  \
37         do {                                                    \
38                 unsigned int err = 0, v, a = addr;              \
39 -               __get8_unaligned_check(ins,val,a,err);          \
40                 __get8_unaligned_check(ins,v,a,err);            \
41 -               val |= v << 8;                                  \
42 +               val =  v << ((BE) ? 24 :  0);                   \
43 +               __get8_unaligned_check(ins,v,a,err);            \
44 +               val |= v << ((BE) ? 16 :  8);                   \
45                 __get8_unaligned_check(ins,v,a,err);            \
46 -               val |= v << 16;                                 \
47 +               val |= v << ((BE) ?  8 : 16);                   \
48                 __get8_unaligned_check(ins,v,a,err);            \
49 -               val |= v << 24;                                 \
50 +               val |= v << ((BE) ?  0 : 24);                   \
51                 if (err)                                        \
52                         goto fault;                             \
53         } while (0)
54 @@ -185,9 +199,9 @@
55  #define __put16_unaligned_check(ins,val,addr)                  \
56         do {                                                    \
57                 unsigned int err = 0, v = val, a = addr;        \
58 -               __asm__(                                        \
59 +               __asm__( FIRST_BYTE_16                          \
60                 "1:     "ins"   %1, [%2], #1\n"                 \
61 -               "       mov     %1, %1, lsr #8\n"               \
62 +               "       mov     %1, %1, "NEXT_BYTE"\n"          \
63                 "2:     "ins"   %1, [%2]\n"                     \
64                 "3:\n"                                          \
65                 "       .section .fixup,\"ax\"\n"               \
66 @@ -215,13 +229,13 @@
67  #define __put32_unaligned_check(ins,val,addr)                  \
68         do {                                                    \
69                 unsigned int err = 0, v = val, a = addr;        \
70 -               __asm__(                                        \
71 +               __asm__( FIRST_BYTE_32                          \
72                 "1:     "ins"   %1, [%2], #1\n"                 \
73 -               "       mov     %1, %1, lsr #8\n"               \
74 +               "       mov     %1, %1, "NEXT_BYTE"\n"          \
75                 "2:     "ins"   %1, [%2], #1\n"                 \
76 -               "       mov     %1, %1, lsr #8\n"               \
77 +               "       mov     %1, %1, "NEXT_BYTE"\n"          \
78                 "3:     "ins"   %1, [%2], #1\n"                 \
79 -               "       mov     %1, %1, lsr #8\n"               \
80 +               "       mov     %1, %1, "NEXT_BYTE"\n"          \
81                 "4:     "ins"   %1, [%2]\n"                     \
82                 "5:\n"                                          \
83                 "       .section .fixup,\"ax\"\n"               \
84
85