]> pilppa.org Git - familiar-h63xx-build.git/blob - org.handhelds.familiar/packages/glibc/glibc-2.3.2/50_glibc232-hppa-full-nptl-2003-10-22.patch
OE tree imported from monotone branch org.openembedded.oz354fam083 at revision 8b12e3...
[familiar-h63xx-build.git] / org.handhelds.familiar / packages / glibc / glibc-2.3.2 / 50_glibc232-hppa-full-nptl-2003-10-22.patch
1 --- glibc-2.3.2-orig-debian/elf/dynamic-link.h  2003-10-22 01:06:09.000000000 -0400
2 +++ glibc-2.3.2/elf/dynamic-link.h      2003-10-22 01:11:53.000000000 -0400
3 @@ -39,12 +39,21 @@
4  elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
5                   const ElfW(Sym) *sym, const struct r_found_version *version,
6                   void *const reloc_addr);
7 +# if ELF_MACHINE_REL_RELATIVE_NEEDSLINKMAP
8 +auto void __attribute__((always_inline))
9 +elf_machine_rel_relative (struct link_map *map, ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
10 +                         void *const reloc_addr);
11 +auto void __attribute__((always_inline))
12 +elf_machine_rela_relative (struct link_map *map, ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
13 +                         void *const reloc_addr);
14 +# else
15  auto void __attribute__((always_inline))
16  elf_machine_rel_relative (ElfW(Addr) l_addr, const ElfW(Rel) *reloc,
17                           void *const reloc_addr);
18  auto void __attribute__((always_inline))
19  elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
20                            void *const reloc_addr);
21 +# endif
22  # if ELF_MACHINE_NO_RELA || defined ELF_MACHINE_PLT_REL
23  auto void __attribute__((always_inline))
24  elf_machine_lazy_rel (struct link_map *map,
25 --- glibc-2.3.2-orig-debian/linuxthreads/attr.c 2003-10-22 01:06:10.000000000 -0400
26 +++ glibc-2.3.2/linuxthreads/attr.c     2003-10-22 01:07:38.000000000 -0400
27 @@ -450,12 +450,19 @@
28                 {
29                   /* Found the entry.  Now we have the info we need.  */
30                   attr->__stacksize = rl.rlim_cur;
31 +#ifdef _STACK_GROWS_DOWN
32                   attr->__stackaddr = (void *) to;
33 -
34 +#else
35 +                  attr->__stackaddr = (void *) from;
36 +#endif
37                   /* The limit might be too high.  This is a bogus
38                      situation but try to avoid making it worse.  */
39                   if ((size_t) attr->__stacksize > (size_t) attr->__stackaddr)
40 +#ifdef _STACK_GROWS_DOWN
41                     attr->__stacksize = (size_t) attr->__stackaddr;
42 +#else
43 +                    attr->__stacksize = (size_t)(to - from);
44 +#endif
45  
46                   /* We succeed and no need to look further.  */
47                   ret = 0;
48 --- glibc-2.3.2-orig-debian/linuxthreads/descr.h        2003-10-22 01:06:10.000000000 -0400
49 +++ glibc-2.3.2/linuxthreads/descr.h    2003-10-22 01:07:38.000000000 -0400
50 @@ -71,7 +71,7 @@
51  /* Atomic counter made possible by compare_and_swap */
52  struct pthread_atomic {
53    long p_count;
54 -  int p_spinlock;
55 +  __atomic_lock_t p_spinlock;
56  };
57  
58  
59 --- glibc-2.3.2-orig-debian/linuxthreads/manager.c      2003-10-22 01:06:10.000000000 -0400
60 +++ glibc-2.3.2/linuxthreads/manager.c  2003-10-22 01:07:38.000000000 -0400
61 @@ -70,8 +70,13 @@
62  #else
63  static inline pthread_descr thread_segment(int seg)
64  {
65 +# ifdef _STACK_GROWS_UP
66 +  return (pthread_descr)(THREAD_STACK_START_ADDRESS + (seg - 1) * STACK_SIZE)
67 +         + 1;
68 +# else
69    return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
70           - 1;
71 +# endif
72  }
73  #endif
74  
75 --- glibc-2.3.2-orig-debian/linuxthreads/oldsemaphore.c 2003-10-22 01:03:57.000000000 -0400
76 +++ glibc-2.3.2/linuxthreads/oldsemaphore.c     2003-10-22 01:07:38.000000000 -0400
77 @@ -31,7 +31,7 @@
78  
79  typedef struct {
80      long int sem_status;
81 -    int sem_spinlock;
82 +    __atomic_lock_t sem_spinlock;
83  } old_sem_t;
84  
85  extern int __old_sem_init (old_sem_t *__sem, int __pshared, unsigned int __value);
86 --- glibc-2.3.2-orig-debian/linuxthreads/pt-machine.c   2003-10-22 01:03:57.000000000 -0400
87 +++ glibc-2.3.2/linuxthreads/pt-machine.c       2003-10-22 01:07:38.000000000 -0400
88 @@ -19,7 +19,9 @@
89  
90  #define PT_EI
91  
92 -extern long int testandset (int *spinlock);
93 +#include <pthread.h>
94 +
95 +extern long int testandset (__atomic_lock_t *spinlock);
96  extern int __compare_and_swap (long int *p, long int oldval, long int newval);
97  
98  #include <pt-machine.h>
99 --- glibc-2.3.2-orig-debian/linuxthreads/pthread.c      2003-10-22 01:06:16.000000000 -0400
100 +++ glibc-2.3.2/linuxthreads/pthread.c  2003-10-22 01:07:38.000000000 -0400
101 @@ -300,9 +300,9 @@
102    pthread_descr self;
103  
104    /* First of all init __pthread_handles[0] and [1] if needed.  */
105 -# if __LT_SPINLOCK_INIT != 0
106 -  __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
107 -  __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
108 +# ifdef __LT_INITIALIZER_NOT_ZERO
109 +  __pthread_handles[0].h_lock = __LOCK_ALT_INITIALIZER;
110 +  __pthread_handles[1].h_lock = __LOCK_ALT_INITIALIZER;
111  # endif
112  # ifndef SHARED
113    /* Unlike in the dynamically linked case the dynamic linker has not
114 @@ -370,7 +370,7 @@
115  # endif
116    /* self->p_start_args need not be initialized, it's all zero.  */
117    self->p_userstack = 1;
118 -# if __LT_SPINLOCK_INIT != 0
119 +# ifdef __LT_INITIALIZER_NOT_ZERO 
120    self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
121  # endif
122    self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
123 @@ -384,9 +384,9 @@
124  #else  /* USE_TLS */
125  
126    /* First of all init __pthread_handles[0] and [1].  */
127 -# if __LT_SPINLOCK_INIT != 0
128 -  __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
129 -  __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
130 +# ifdef __LT_INITIALIZER_NOT_ZERO
131 +  __pthread_handles[0].h_lock = __LOCK_ALT_INITIALIZER;
132 +  __pthread_handles[1].h_lock = __LOCK_ALT_INITIALIZER;
133  # endif
134    __pthread_handles[0].h_descr = &__pthread_initial_thread;
135    __pthread_handles[1].h_descr = &__pthread_manager_thread;
136 @@ -893,7 +893,11 @@
137    /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
138       the manager threads handled specially in thread_self(), so start at 2 */
139    h = __pthread_handles + 2;
140 +# ifdef _STACK_GROWS_UP
141 +  while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr)) h++;
142 +# else
143    while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
144 +# endif
145    return h->h_descr;
146  }
147  
148 @@ -908,11 +912,22 @@
149      return manager_thread;
150    h = __pthread_handles + 2;
151  # ifdef USE_TLS
152 +#  ifdef _STACK_GROWS_UP
153 +  while (h->h_descr == NULL
154 +        || ! (sp >= h->h_descr->p_stackaddr && 
155 +              sp < h->h_descr->p_guardaddr))
156 +#  else
157    while (h->h_descr == NULL
158 -        || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
159 +        || ! (sp <= h->h_descr->p_stackaddr && 
160 +              sp >= h->h_bottom))
161 +#  endif
162      h++;
163  # else
164 +#  ifdef _STACK_GROWS_UP
165 +  while (! (sp >= (char *) h->h_descr && sp < h->h_descr->p_guardaddr))
166 +#  else
167    while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
168 +#  endif
169      h++;
170  # endif
171    return h->h_descr;
172 --- glibc-2.3.2-orig-debian/linuxthreads/spinlock.c     2003-10-22 01:03:57.000000000 -0400
173 +++ glibc-2.3.2/linuxthreads/spinlock.c 2003-10-22 01:07:38.000000000 -0400
174 @@ -24,9 +24,9 @@
175  #include "spinlock.h"
176  #include "restart.h"
177  
178 -static void __pthread_acquire(int * spinlock);
179 +static void __pthread_acquire(__atomic_lock_t * spinlock);
180  
181 -static inline void __pthread_release(int * spinlock)
182 +static inline void __pthread_release(__atomic_lock_t * spinlock)
183  {
184    WRITE_MEMORY_BARRIER();
185    *spinlock = __LT_SPINLOCK_INIT;
186 @@ -269,11 +269,11 @@
187  struct wait_node {
188    struct wait_node *next;      /* Next node in null terminated linked list */
189    pthread_descr thr;           /* The thread waiting with this node */
190 -  int abandoned;               /* Atomic flag */
191 +  __atomic_lock_t abandoned;   /* Atomic flag */
192  };
193  
194  static long wait_node_free_list;
195 -static int wait_node_free_list_spinlock;
196 +__pthread_lock_define_initialized(static, wait_node_free_list_spinlock);
197  
198  /* Allocate a new node from the head of the free list using an atomic
199     operation, or else using malloc if that list is empty.  A fundamental
200 @@ -376,7 +376,7 @@
201        if (self == NULL)
202         self = thread_self();
203  
204 -      wait_node.abandoned = 0;
205 +      wait_node.abandoned = __LT_SPINLOCK_INIT;
206        wait_node.next = (struct wait_node *) lock->__status;
207        wait_node.thr = self;
208        lock->__status = (long) &wait_node;
209 @@ -402,7 +402,7 @@
210        wait_node.thr = self;
211        newstatus = (long) &wait_node;
212      }
213 -    wait_node.abandoned = 0;
214 +    wait_node.abandoned = __LT_SPINLOCK_INIT;
215      wait_node.next = (struct wait_node *) oldstatus;
216      /* Make sure the store in wait_node.next completes before performing
217         the compare-and-swap */
218 @@ -451,7 +451,7 @@
219        if (self == NULL)
220         self = thread_self();
221  
222 -      p_wait_node->abandoned = 0;
223 +      p_wait_node->abandoned = __LT_SPINLOCK_INIT;
224        p_wait_node->next = (struct wait_node *) lock->__status;
225        p_wait_node->thr = self;
226        lock->__status = (long) p_wait_node;
227 @@ -474,7 +474,7 @@
228        p_wait_node->thr = self;
229        newstatus = (long) p_wait_node;
230      }
231 -    p_wait_node->abandoned = 0;
232 +    p_wait_node->abandoned = __LT_SPINLOCK_INIT;
233      p_wait_node->next = (struct wait_node *) oldstatus;
234      /* Make sure the store in wait_node.next completes before performing
235         the compare-and-swap */
236 @@ -574,7 +574,7 @@
237      while (p_node != (struct wait_node *) 1) {
238        int prio;
239  
240 -      if (p_node->abandoned) {
241 +      if (lock_held(&p_node->abandoned)) {
242         /* Remove abandoned node. */
243  #if defined TEST_FOR_COMPARE_AND_SWAP
244         if (!__pthread_has_cas)
245 @@ -662,7 +662,7 @@
246  #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
247  
248  int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
249 -                               int * spinlock)
250 +                               __atomic_lock_t * spinlock)
251  {
252    int res;
253  
254 @@ -699,7 +699,7 @@
255     - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
256       sched_yield(), then sleeping again if needed. */
257  
258 -static void __pthread_acquire(int * spinlock)
259 +static void __pthread_acquire(__atomic_lock_t * spinlock)      
260  {
261    int cnt = 0;
262    struct timespec tm;
263 --- glibc-2.3.2-orig-debian/linuxthreads/spinlock.h     2003-10-22 01:06:10.000000000 -0400
264 +++ glibc-2.3.2/linuxthreads/spinlock.h 2003-10-22 01:07:38.000000000 -0400
265 @@ -33,14 +33,28 @@
266  #endif
267  #endif
268  
269 +/* Define lock_held for all arches that don't need a modified copy. */
270 +#ifndef __LT_INITIALIZER_NOT_ZERO
271 +# define lock_held(p) *(p)
272 +#endif
273 +
274 +/* Initliazers for possibly complex structures */
275 +#ifdef __LT_INITIALIZER_NOT_ZERO
276 +# define __pthread_lock_define_initialized(CLASS,NAME) \
277 +       CLASS __atomic_lock_t NAME = __LT_SPINLOCK_ALT_INIT
278 +#else
279 +# define __pthread_lock_define_initialized(CLASS,NAME) \
280 +       CLASS __atomic_lock_t NAME
281 +#endif
282 +
283  #if defined(TEST_FOR_COMPARE_AND_SWAP)
284  
285  extern int __pthread_has_cas;
286  extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
287 -                                      int * spinlock);
288 +                                      __atomic_lock_t * spinlock);
289  
290  static inline int compare_and_swap(long * ptr, long oldval, long newval,
291 -                                   int * spinlock)
292 +                                   __atomic_lock_t * spinlock)
293  {
294    if (__builtin_expect (__pthread_has_cas, 1))
295      return __compare_and_swap(ptr, oldval, newval);
296 @@ -58,7 +72,7 @@
297  
298  static inline int
299  compare_and_swap_with_release_semantics (long * ptr, long oldval,
300 -                                        long newval, int * spinlock)
301 +                                        long newval, __atomic_lock_t * spinlock)
302  {
303    return __compare_and_swap_with_release_semantics (ptr, oldval,
304                                                     newval);
305 @@ -67,7 +81,7 @@
306  #endif
307  
308  static inline int compare_and_swap(long * ptr, long oldval, long newval,
309 -                                   int * spinlock)
310 +                                   __atomic_lock_t * spinlock)
311  {
312    return __compare_and_swap(ptr, oldval, newval);
313  }
314 @@ -75,10 +89,10 @@
315  #else
316  
317  extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
318 -                                      int * spinlock);
319 +                                      __atomic_lock_t * spinlock);
320  
321  static inline int compare_and_swap(long * ptr, long oldval, long newval,
322 -                                   int * spinlock)
323 +                                   __atomic_lock_t * spinlock)
324  {
325    return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
326  }
327 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/hppa/pspinlock.c       2003-10-22 01:03:57.000000000 -0400
328 +++ glibc-2.3.2/linuxthreads/sysdeps/hppa/pspinlock.c   2003-10-22 01:07:38.000000000 -0400
329 @@ -24,13 +24,10 @@
330  int
331  __pthread_spin_lock (pthread_spinlock_t *lock)
332  {
333 -  unsigned int val;
334 +  volatile unsigned int *addr = __ldcw_align (lock);
335  
336 -  do
337 -    asm volatile ("ldcw %1,%0"
338 -                 : "=r" (val), "=m" (*lock)
339 -                 : "m" (*lock));
340 -  while (!val);
341 +  while (__ldcw (addr) == 0)
342 +    while (*addr == 0) ;
343  
344    return 0;
345  }
346 @@ -40,13 +37,9 @@
347  int
348  __pthread_spin_trylock (pthread_spinlock_t *lock)
349  {
350 -  unsigned int val;
351 +  volatile unsigned int *a = __ldcw_align (lock);
352  
353 -  asm volatile ("ldcw %1,%0"
354 -               : "=r" (val), "=m" (*lock)
355 -               : "m" (*lock));
356 -
357 -  return val ? 0 : EBUSY;
358 +  return __ldcw (a) ? 0 : EBUSY;
359  }
360  weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
361  
362 @@ -54,7 +47,9 @@
363  int
364  __pthread_spin_unlock (pthread_spinlock_t *lock)
365  {
366 -  *lock = 1;
367 +  volatile unsigned int *a = __ldcw_align (lock);
368 +
369 +  *a = 1;
370    return 0;
371  }
372  weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
373 @@ -66,7 +61,9 @@
374    /* We can ignore the `pshared' parameter.  Since we are busy-waiting
375       all processes which can access the memory location `lock' points
376       to can use the spinlock.  */
377 -  *lock = 1;
378 +  volatile unsigned int *a = __ldcw_align (lock);
379 +
380 +  *a = 1;
381    return 0;
382  }
383  weak_alias (__pthread_spin_init, pthread_spin_init)
384 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/hppa/pt-machine.h      2003-10-22 01:06:10.000000000 -0400
385 +++ glibc-2.3.2/linuxthreads/sysdeps/hppa/pt-machine.h  2003-10-22 01:12:51.000000000 -0400
386 @@ -22,41 +22,97 @@
387  #ifndef _PT_MACHINE_H
388  #define _PT_MACHINE_H   1
389  
390 +#include <sys/types.h>
391  #include <bits/initspin.h>
392  
393  #ifndef PT_EI
394  # define PT_EI extern inline __attribute__ ((always_inline))
395  #endif
396  
397 -extern long int testandset (int *spinlock);
398 -extern int __compare_and_swap (long int *p, long int oldval, long int newval);
399 +extern inline long int testandset (__atomic_lock_t *spinlock);
400 +extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
401 +extern inline int lock_held (__atomic_lock_t *spinlock); 
402 +extern inline int __load_and_clear (__atomic_lock_t *spinlock);
403  
404  /* Get some notion of the current stack.  Need not be exactly the top
405     of the stack, just something somewhere in the current frame.  */
406  #define CURRENT_STACK_FRAME  stack_pointer
407  register char * stack_pointer __asm__ ("%r30");
408  
409 +/* Get/Set thread-specific pointer.  We have to call into the kernel to
410 + * modify it, but we can read it in user mode.  */
411 +
412 +#define THREAD_SELF __get_cr27()
413 +
414 +static inline struct _pthread_descr_struct * __get_cr27(void)
415 +{
416 +       long cr27;
417 +       asm("mfctl %%cr27, %0" : "=r" (cr27) : );
418 +       return (struct _pthread_descr_struct *) cr27;
419 +}
420 +
421 +#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
422 +
423 +static inline void __set_cr27(struct _pthread_descr_struct * cr27)
424 +{
425 +       asm(
426 +               "ble    0xe0(%%sr2, %%r0)\n\t"
427 +               "copy   %0, %%r26"
428 +        : : "r" (cr27) : "r26" );
429 +}
430 +
431 +/* We want the OS to assign stack addresses.  */
432 +#define FLOATING_STACKS        1
433 +#define ARCH_STACK_MAX_SIZE    8*1024*1024
434  
435  /* The hppa only has one atomic read and modify memory operation,
436     load and clear, so hppa spinlocks must use zero to signify that
437 -   someone is holding the lock.  */
438 +   someone is holding the lock.  The address used for the ldcw
439 +   semaphore must be 16-byte aligned.  */
440 +#define __ldcw(a) ({ \
441 +  unsigned int __ret;                                                  \
442 +  __asm__ __volatile__("ldcw 0(%2),%0"                                 \
443 +                      : "=r" (__ret), "=m" (*(a)) : "r" (a));          \
444 +  __ret;                                                               \
445 +})
446 +
447 +/* Because malloc only guarantees 8-byte alignment for malloc'd data,
448 +   and GCC only guarantees 8-byte alignment for stack locals, we can't
449 +   be assured of 16-byte alignment for atomic lock data even if we
450 +   specify "__attribute ((aligned(16)))" in the type declaration.  So,
451 +   we use a struct containing an array of four ints for the atomic lock
452 +   type and dynamically select the 16-byte aligned int from the array
453 +   for the semaphore.  */
454 +#define __PA_LDCW_ALIGNMENT 16
455 +#define __ldcw_align(a) ({ \
456 +  volatile unsigned int __ret = (unsigned int) a;                      \
457 +  if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a)         \
458 +    __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
459 +  (unsigned int *) __ret;                                              \
460 +})
461  
462 -#define xstr(s) str(s)
463 -#define str(s) #s
464  /* Spinlock implementation; required.  */
465 -PT_EI long int
466 -testandset (int *spinlock)
467 +PT_EI int
468 +__load_and_clear (__atomic_lock_t *spinlock)
469  {
470 -  int ret;
471 +  volatile unsigned int *a = __ldcw_align (spinlock);
472  
473 -  __asm__ __volatile__(
474 -       "ldcw 0(%2),%0"
475 -       : "=r"(ret), "=m"(*spinlock)
476 -       : "r"(spinlock));
477 +  return __ldcw (a);
478 +}
479  
480 -  return ret == 0;
481 +/* Emulate testandset */
482 +PT_EI long int
483 +testandset (__atomic_lock_t *spinlock)
484 +{
485 +  return (__load_and_clear(spinlock) == 0);
486  }
487 -#undef str
488 -#undef xstr
489  
490 +PT_EI int
491 +lock_held (__atomic_lock_t *spinlock)
492 +{
493 +  volatile unsigned int *a = __ldcw_align (spinlock);
494 +
495 +  return *a == 0;
496 +}
497 +               
498  #endif /* pt-machine.h */
499 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/hppa/pt-machine.h.rej  1969-12-31 19:00:00.000000000 -0500
500 +++ glibc-2.3.2/linuxthreads/sysdeps/hppa/pt-machine.h.rej      2003-10-22 01:07:38.000000000 -0400
501 @@ -0,0 +1,153 @@
502 +***************
503 +*** 1,6 ****
504 +  /* Machine-dependent pthreads configuration and inline functions.
505 +     hppa version.
506 +-    Copyright (C) 2000, 2002 Free Software Foundation, Inc.
507 +     This file is part of the GNU C Library.
508 +     Contributed by Richard Henderson <rth@tamu.edu>.
509 +  
510 +--- 1,6 ----
511 +  /* Machine-dependent pthreads configuration and inline functions.
512 +     hppa version.
513 ++    Copyright (C) 2000, 2002, 2003 Free Software Foundation, Inc.
514 +     This file is part of the GNU C Library.
515 +     Contributed by Richard Henderson <rth@tamu.edu>.
516 +  
517 +***************
518 +*** 22,62 ****
519 +  #ifndef _PT_MACHINE_H
520 +  #define _PT_MACHINE_H   1
521 +  
522 +  #include <bits/initspin.h>
523 +  
524 +  #ifndef PT_EI
525 +  # define PT_EI extern inline
526 +  #endif
527 +  
528 +- extern long int testandset (int *spinlock);
529 +- extern int __compare_and_swap (long int *p, long int oldval, long int newval);
530 +  
531 +  /* Get some notion of the current stack.  Need not be exactly the top
532 +     of the stack, just something somewhere in the current frame.  */
533 +  #define CURRENT_STACK_FRAME  stack_pointer
534 +  register char * stack_pointer __asm__ ("%r30");
535 +  
536 +- 
537 +  /* The hppa only has one atomic read and modify memory operation,
538 +     load and clear, so hppa spinlocks must use zero to signify that
539 +-    someone is holding the lock.  */
540 +- 
541 +- #define xstr(s) str(s)
542 +- #define str(s) #s
543 +  /* Spinlock implementation; required.  */
544 +  PT_EI long int
545 +- testandset (int *spinlock)
546 +  {
547 +-   int ret;
548 +- 
549 +-   __asm__ __volatile__(
550 +-        "ldcw 0(%2),%0"
551 +-        : "=r"(ret), "=m"(*spinlock)
552 +-        : "r"(spinlock));
553 +- 
554 +-   return ret == 0;
555 +  }
556 +- #undef str
557 +- #undef xstr
558 +- 
559 +  #endif /* pt-machine.h */
560 +--- 22,115 ----
561 +  #ifndef _PT_MACHINE_H
562 +  #define _PT_MACHINE_H   1
563 +  
564 ++ #include <sys/types.h>
565 +  #include <bits/initspin.h>
566 +  
567 +  #ifndef PT_EI
568 +  # define PT_EI extern inline
569 +  #endif
570 +  
571 ++ extern inline long int testandset (__atomic_lock_t *spinlock);
572 ++ extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
573 ++ extern inline int lock_held (__atomic_lock_t *spinlock); 
574 ++ extern inline int __load_and_clear (__atomic_lock_t *spinlock);
575 +  
576 +  /* Get some notion of the current stack.  Need not be exactly the top
577 +     of the stack, just something somewhere in the current frame.  */
578 +  #define CURRENT_STACK_FRAME  stack_pointer
579 +  register char * stack_pointer __asm__ ("%r30");
580 +  
581 ++ /* Get/Set thread-specific pointer.  We have to call into the kernel to
582 ++    modify it, but we can read it in user mode.  */
583 ++  
584 ++ #define THREAD_SELF __get_cr27()
585 ++  
586 ++ static inline struct _pthread_descr_struct * __get_cr27(void)
587 ++ {
588 ++   long cr27;
589 ++   asm("mfctl %%cr27, %0" : "=r" (cr27) : );
590 ++   return (struct _pthread_descr_struct *) cr27;
591 ++ }
592 ++  
593 ++ #define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
594 ++  
595 ++ static inline void __set_cr27(struct _pthread_descr_struct * cr27)
596 ++ {
597 ++   asm(       "       ble 0xe0(%%sr2, %%r0)\n"
598 ++      "       copy %0,%%r26"
599 ++      : : "r" (cr27) : "r26" );
600 ++ }
601 ++  
602 ++ /* We want the OS to assign stack addresses.  */
603 ++ #define FLOATING_STACKS      1
604 ++ #define ARCH_STACK_MAX_SIZE  8*1024*1024
605 ++  
606 +  /* The hppa only has one atomic read and modify memory operation,
607 +     load and clear, so hppa spinlocks must use zero to signify that
608 ++    someone is holding the lock.  The address used for the ldcw
609 ++    semaphore must be 16-byte aligned.  */
610 ++ #define __ldcw(a) ({ \
611 ++   unsigned int __ret;                                                        \
612 ++   __asm__ __volatile__("ldcw 0(%2),%0"                                       \
613 ++                       : "=r" (__ret), "=m" (*(a)) : "r" (a));                \
614 ++   __ret;                                                             \
615 ++ })
616 ++ 
617 ++ /* Because malloc only guarantees 8-byte alignment for malloc'd data,
618 ++    and GCC only guarantees 8-byte alignment for stack locals, we can't
619 ++    be assured of 16-byte alignment for atomic lock data even if we
620 ++    specify "__attribute ((aligned(16)))" in the type declaration.  So,
621 ++    we use a struct containing an array of four ints for the atomic lock
622 ++    type and dynamically select the 16-byte aligned int from the array
623 ++    for the semaphore.  */
624 ++ #define __PA_LDCW_ALIGNMENT 16
625 ++ #define __ldcw_align(a) ({ \
626 ++   volatile unsigned int __ret = (unsigned int) a;                    \
627 ++   if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a)               \
628 ++     __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
629 ++   (unsigned int *) __ret;                                            \
630 ++ })
631 ++   
632 +  /* Spinlock implementation; required.  */
633 ++ PT_EI int
634 ++ __load_and_clear (__atomic_lock_t *spinlock)
635 ++ {
636 ++   volatile unsigned int *a = __ldcw_align (spinlock);
637 ++   return __ldcw (a);
638 ++ }
639 ++   
640 ++ /* Emulate testandset */
641 +  PT_EI long int
642 ++ testandset (__atomic_lock_t *spinlock)
643 +  {
644 ++   return (__load_and_clear(spinlock) == 0);
645 +  }
646 ++   
647 ++ PT_EI int
648 ++ lock_held (__atomic_lock_t *spinlock)
649 ++ {
650 ++   volatile unsigned int *a = __ldcw_align (spinlock);
651 ++   return *a == 0;
652 ++ }
653 ++      
654 +  #endif /* pt-machine.h */
655 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/pthread/bits/libc-lock.h       2003-10-22 01:06:16.000000000 -0400
656 +++ glibc-2.3.2/linuxthreads/sysdeps/pthread/bits/libc-lock.h   2003-10-22 01:07:38.000000000 -0400
657 @@ -71,12 +71,12 @@
658     initialized locks must be set to one due to the lack of normal
659     atomic operations.) */
660  
661 -#if __LT_SPINLOCK_INIT == 0
662 +#ifdef __LT_INITIALIZER_NOT_ZERO
663  #  define __libc_lock_define_initialized(CLASS,NAME) \
664 -  CLASS __libc_lock_t NAME;
665 +  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
666  #else
667  #  define __libc_lock_define_initialized(CLASS,NAME) \
668 -  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
669 +  CLASS __libc_lock_t NAME;
670  #endif
671  
672  #define __libc_rwlock_define_initialized(CLASS,NAME) \
673 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h    2003-10-22 01:03:57.000000000 -0400
674 +++ glibc-2.3.2/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h        2003-10-22 01:07:38.000000000 -0400
675 @@ -22,12 +22,14 @@
676  #define __need_schedparam
677  #include <bits/sched.h>
678  
679 +typedef int __atomic_lock_t;
680 +
681  /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
682  struct _pthread_fastlock
683  {
684 -  long int __status;   /* "Free" or "taken" or head of waiting list */
685 -  int __spinlock;      /* Used by compare_and_swap emulation. Also,
686 -                         adaptive SMP lock stores spin count here. */
687 +  long int __status;           /* "Free" or "taken" or head of waiting list */
688 +  __atomic_lock_t __spinlock;  /* Used by compare_and_swap emulation. Also,
689 +                                  adaptive SMP lock stores spin count here. */
690  };
691  
692  #ifndef _PTHREAD_DESCR_DEFINED
693 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h   2003-10-22 01:03:57.000000000 -0400
694 +++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h       2003-10-22 01:07:38.000000000 -0400
695 @@ -19,9 +19,23 @@
696  
697  /* Initial value of a spinlock.  PA-RISC only implements atomic load
698     and clear so this must be non-zero. */
699 -#define __LT_SPINLOCK_INIT 1
700 +#define __LT_SPINLOCK_INIT ((__atomic_lock_t) { { 1, 1, 1, 1 } })
701 +
702 +/* Initialize global spinlocks without cast, generally macro wrapped */
703 +#define __LT_SPINLOCK_ALT_INIT { { 1, 1, 1, 1 } }
704 +
705 +/* Macros for lock initializers, not using the above definition.
706 +   The above definition is not used in the case that static initializers
707 +   use this value. */
708 +#define __LOCK_INITIALIZER { __LT_SPINLOCK_ALT_INIT, 0 }
709 +
710 +/* Used to initialize _pthread_fastlock's in non-static case */
711 +#define __LOCK_ALT_INITIALIZER ((struct _pthread_fastlock){ __LT_SPINLOCK_INIT, 0 })
712 +
713 +/* Used in pthread_atomic initialization */
714 +#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_ALT_INIT }
715 +
716 +/* Tell the rest of the code that the initializer is non-zero without
717 +   explaining it's internal structure */
718 +#define __LT_INITIALIZER_NOT_ZERO
719  
720 -/* Macros for lock initializers, using the above definition. */
721 -#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
722 -#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
723 -#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
724 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h       1969-12-31 19:00:00.000000000 -0500
725 +++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h   2003-10-22 01:07:38.000000000 -0400
726 @@ -0,0 +1,160 @@
727 +/* Linuxthreads - a simple clone()-based implementation of Posix        */
728 +/* threads for Linux.                                                   */
729 +/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
730 +/*                                                                      */
731 +/* This program is free software; you can redistribute it and/or        */
732 +/* modify it under the terms of the GNU Library General Public License  */
733 +/* as published by the Free Software Foundation; either version 2       */
734 +/* of the License, or (at your option) any later version.               */
735 +/*                                                                      */
736 +/* This program is distributed in the hope that it will be useful,      */
737 +/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
738 +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
739 +/* GNU Library General Public License for more details.                 */
740 +
741 +#if !defined _BITS_TYPES_H && !defined _PTHREAD_H
742 +# error "Never include <bits/pthreadtypes.h> directly; use <sys/types.h> instead."
743 +#endif
744 +
745 +#ifndef _BITS_PTHREADTYPES_H
746 +#define _BITS_PTHREADTYPES_H   1
747 +
748 +#define __need_schedparam
749 +#include <bits/sched.h>
750 +
751 +/* We need 128-bit alignment for the ldcw semaphore.  At most, we are
752 +   assured of 64-bit alignment for stack locals and malloc'd data.  Thus,
753 +   we use a struct with four ints for the atomic lock type.  The locking
754 +   code will figure out which of the four to use for the ldcw semaphore.  */
755 +typedef volatile struct {
756 +  int lock[4];
757 +} __attribute__ ((aligned(16))) __atomic_lock_t;
758 +
759 +/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
760 +struct _pthread_fastlock
761 +{
762 +  __atomic_lock_t __spinlock;  /* Used by compare_and_swap emulation.  Also,
763 +                                  adaptive SMP lock stores spin count here. */
764 +  long int __status;           /* "Free" or "taken" or head of waiting list */
765 +};
766 +
767 +#ifndef _PTHREAD_DESCR_DEFINED
768 +/* Thread descriptors */
769 +typedef struct _pthread_descr_struct *_pthread_descr;
770 +# define _PTHREAD_DESCR_DEFINED
771 +#endif
772 +
773 +
774 +/* Attributes for threads.  */
775 +typedef struct __pthread_attr_s
776 +{
777 +  int __detachstate;
778 +  int __schedpolicy;
779 +  struct __sched_param __schedparam;
780 +  int __inheritsched;
781 +  int __scope;
782 +  size_t __guardsize;
783 +  int __stackaddr_set;
784 +  void *__stackaddr;
785 +  size_t __stacksize;
786 +} pthread_attr_t;
787 +
788 +
789 +/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
790 +
791 +#ifdef __GLIBC_HAVE_LONG_LONG
792 +__extension__ typedef long long __pthread_cond_align_t;
793 +#else
794 +typedef long __pthread_cond_align_t;
795 +#endif
796 +
797 +typedef struct
798 +{
799 +  struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
800 +  _pthread_descr __c_waiting;        /* Threads waiting on this condition */
801 +  char __padding[48 - sizeof (struct _pthread_fastlock)
802 +                - sizeof (_pthread_descr) - sizeof (__pthread_cond_align_t)];
803 +  __pthread_cond_align_t __align;
804 +} pthread_cond_t;
805 +
806 +
807 +/* Attribute for conditionally variables.  */
808 +typedef struct
809 +{
810 +  int __dummy;
811 +} pthread_condattr_t;
812 +
813 +/* Keys for thread-specific data */
814 +typedef unsigned int pthread_key_t;
815 +
816 +
817 +/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER).  */
818 +/* (The layout is unnatural to maintain binary compatibility
819 +    with earlier releases of LinuxThreads.) */
820 +typedef struct
821 +{
822 +  int __m_reserved;               /* Reserved for future use */
823 +  int __m_count;                  /* Depth of recursive locking */
824 +  _pthread_descr __m_owner;       /* Owner thread (if recursive or errcheck) */
825 +  int __m_kind;                   /* Mutex kind: fast, recursive or errcheck */
826 +  struct _pthread_fastlock __m_lock; /* Underlying fast lock */
827 +} pthread_mutex_t;
828 +
829 +
830 +/* Attribute for mutex.  */
831 +typedef struct
832 +{
833 +  int __mutexkind;
834 +} pthread_mutexattr_t;
835 +
836 +
837 +/* Once-only execution */
838 +typedef int pthread_once_t;
839 +
840 +
841 +#ifdef __USE_UNIX98
842 +/* Read-write locks.  */
843 +typedef struct _pthread_rwlock_t
844 +{
845 +  struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
846 +  int __rw_readers;                   /* Number of readers */
847 +  _pthread_descr __rw_writer;         /* Identity of writer, or NULL if none */
848 +  _pthread_descr __rw_read_waiting;   /* Threads waiting for reading */
849 +  _pthread_descr __rw_write_waiting;  /* Threads waiting for writing */
850 +  int __rw_kind;                      /* Reader/Writer preference selection */
851 +  int __rw_pshared;                   /* Shared between processes or not */
852 +} pthread_rwlock_t;
853 +
854 +
855 +/* Attribute for read-write locks.  */
856 +typedef struct
857 +{
858 +  int __lockkind;
859 +  int __pshared;
860 +} pthread_rwlockattr_t;
861 +#endif
862 +
863 +#ifdef __USE_XOPEN2K
864 +/* POSIX spinlock data type.  */
865 +typedef __atomic_lock_t pthread_spinlock_t;
866 +
867 +/* POSIX barrier. */
868 +typedef struct {
869 +  struct _pthread_fastlock __ba_lock; /* Lock to guarantee mutual exclusion */
870 +  int __ba_required;                  /* Threads needed for completion */
871 +  int __ba_present;                   /* Threads waiting */
872 +  _pthread_descr __ba_waiting;        /* Queue of waiting threads */
873 +} pthread_barrier_t;
874 +
875 +/* barrier attribute */
876 +typedef struct {
877 +  int __pshared;
878 +} pthread_barrierattr_t;
879 +
880 +#endif
881 +
882 +
883 +/* Thread identifiers */
884 +typedef unsigned long int pthread_t;
885 +
886 +#endif /* bits/pthreadtypes.h */
887 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/malloc-machine.h  1969-12-31 19:00:00.000000000 -0500
888 +++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/malloc-machine.h      2003-10-22 01:07:38.000000000 -0400
889 @@ -0,0 +1,73 @@
890 +/* HP-PARISC macro definitions for mutexes, thread-specific data 
891 +   and parameters for malloc.
892 +   Copyright (C) 2003 Free Software Foundation, Inc.
893 +   This file is part of the GNU C Library.
894 +   Contributed by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003.
895 +   
896 +   The GNU C Library is free software; you can redistribute it and/or
897 +   modify it under the terms of the GNU Lesser General Public
898 +   License as published by the Free Software Foundation; either
899 +   version 2.1 of the License, or (at your option) any later version.
900 +
901 +   The GNU C Library is distributed in the hope that it will be useful,
902 +   but WITHOUT ANY WARRANTY; without even the implied warranty of
903 +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
904 +   Lesser General Public License for more details.
905 +
906 +   You should have received a copy of the GNU Lesser General Public
907 +   License along with the GNU C Library; if not, write to the Free
908 +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
909 +   02111-1307 USA.  */
910 +
911 +#ifndef _MALLOC_MACHINE_H
912 +#define _MALLOC_MACHINE_H
913 +
914 +#undef thread_atfork_static
915 +
916 +#include <atomic.h>
917 +#include <bits/libc-lock.h>
918 +
919 +__libc_lock_define (typedef, mutex_t)
920 +
921 +/* Since our lock structure does not tolerate being initialized to zero, we must
922 +   modify the standard function calls made by malloc */
923 +#  define mutex_init(m)                \
924 +       __libc_maybe_call (__pthread_mutex_init, (m, NULL), \
925 +               (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT),(*(int *)(m))) )
926 +#  define mutex_lock(m)                \
927 +       __libc_maybe_call (__pthread_mutex_lock, (m), \
928 +                       (__load_and_clear(&((m)->__m_lock.__spinlock)), 0))
929 +#  define mutex_trylock(m)     \
930 +       __libc_maybe_call (__pthread_mutex_trylock, (m), \
931 +                       (*(int *)(m) ? 1 : (__load_and_clear(&((m)->__m_lock.__spinlock)), 0)))
932 +#  define mutex_unlock(m)      \
933 +       __libc_maybe_call (__pthread_mutex_unlock, (m), \
934 +                       (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT), (*(int *)(m))) )
935 +       
936 +/* This is defined by newer gcc version unique for each module.  */
937 +extern void *__dso_handle __attribute__ ((__weak__));
938 +
939 +#include <fork.h>
940 +
941 +#ifdef SHARED
942 +# define thread_atfork(prepare, parent, child) \
943 +   __register_atfork (prepare, parent, child, __dso_handle)
944 +#else
945 +# define thread_atfork(prepare, parent, child) \
946 +   __register_atfork (prepare, parent, child,                                \
947 +                     &__dso_handle == NULL ? NULL : __dso_handle)
948 +#endif
949 +
950 +/* thread specific data for glibc */
951 +
952 +#include <bits/libc-tsd.h>
953 +
954 +typedef int tsd_key_t[1];      /* no key data structure, libc magic does it */
955 +__libc_tsd_define (static, MALLOC)     /* declaration/common definition */
956 +#define tsd_key_create(key, destr)     ((void) (key))
957 +#define tsd_setspecific(key, data)     __libc_tsd_set (MALLOC, (data))
958 +#define tsd_getspecific(key, vptr)     ((vptr) = __libc_tsd_get (MALLOC))
959 +
960 +#include <sysdeps/generic/malloc-machine.h>
961 +
962 +#endif /* !defined(_MALLOC_MACHINE_H) */
963 --- glibc-2.3.2-orig-debian/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h   1969-12-31 19:00:00.000000000 -0500
964 +++ glibc-2.3.2/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h       2003-10-22 01:07:38.000000000 -0400
965 @@ -0,0 +1,190 @@
966 +/* Copyright (C) 2003 Free Software Foundation, Inc.
967 +   This file is part of the GNU C Library.
968 +   Contributed by Carlos O'Donell <carlos@baldric.uwo.ca>, 2003.
969 +
970 +   The GNU C Library is free software; you can redistribute it and/or
971 +   modify it under the terms of the GNU Lesser General Public
972 +   License as published by the Free Software Foundation; either
973 +   version 2.1 of the License, or (at your option) any later version.
974 +
975 +   The GNU C Library is distributed in the hope that it will be useful,
976 +   but WITHOUT ANY WARRANTY; without even the implied warranty of
977 +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
978 +   Lesser General Public License for more details.
979 +
980 +   You should have received a copy of the GNU Lesser General Public
981 +   License along with the GNU C Library; if not, write to the Free
982 +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
983 +   02111-1307 USA.  */
984 +
985 +#include <sysdep.h>
986 +#ifndef __ASSEMBLER__
987 +# include <linuxthreads/internals.h>
988 +#endif
989 +
990 +#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
991 +
992 +# ifndef NO_ERROR
993 +#  define NO_ERROR -0x1000
994 +# endif
995 +
996 +# undef PSEUDO
997 +# define PSEUDO(name, syscall_name, args)                              \
998 +  ENTRY (name)                                                         \
999 +    SINGLE_THREAD_P                                    ASM_LINE_SEP    \
1000 +    cmpib,<> 0,%ret0,Lpseudo_cancel                    ASM_LINE_SEP    \
1001 +    nop                                                        ASM_LINE_SEP    \
1002 +    DO_CALL(syscall_name, args)                                ASM_LINE_SEP    \
1003 +    /* DONE! */                                                ASM_LINE_SEP    \
1004 +    bv 0(2)                                            ASM_LINE_SEP    \
1005 +    nop                                                        ASM_LINE_SEP    \
1006 +  Lpseudo_cancel:                                      ASM_LINE_SEP    \
1007 +    /* store return ptr */                             ASM_LINE_SEP    \
1008 +    stw %rp, -20(%sr0,%sp)                             ASM_LINE_SEP    \
1009 +    /* save syscall args */                            ASM_LINE_SEP    \
1010 +    PUSHARGS_##args /* MACRO */                                ASM_LINE_SEP    \
1011 +    STW_PIC                                            ASM_LINE_SEP    \
1012 +    CENABLE /* FUNC CALL */                            ASM_LINE_SEP    \
1013 +    ldo 64(%sp), %sp                                   ASM_LINE_SEP    \
1014 +    ldo -64(%sp), %sp                                  ASM_LINE_SEP    \
1015 +    LDW_PIC                                            ASM_LINE_SEP    \
1016 +    /* restore syscall args */                         ASM_LINE_SEP    \
1017 +    POPARGS_##args                                     ASM_LINE_SEP    \
1018 +    /* save r4 in arg0 stack slot */                   ASM_LINE_SEP    \
1019 +    stw %r4, -36(%sr0,%sp)                             ASM_LINE_SEP    \
1020 +    /* save mask from cenable */                       ASM_LINE_SEP    \
1021 +    copy %ret0, %r4                                    ASM_LINE_SEP    \
1022 +    ble 0x100(%sr2,%r0)                                        ASM_LINE_SEP    \
1023 +    ldi SYS_ify (syscall_name), %r20                   ASM_LINE_SEP    \
1024 +    LDW_PIC                                            ASM_LINE_SEP    \
1025 +    /* pass mask as arg0 to cdisable */                        ASM_LINE_SEP    \
1026 +    copy %r4, %r26                                     ASM_LINE_SEP    \
1027 +    copy %ret0, %r4                                    ASM_LINE_SEP    \
1028 +    CDISABLE                                           ASM_LINE_SEP    \
1029 +    ldo 64(%sp), %sp                                   ASM_LINE_SEP    \
1030 +    ldo -64(%sp), %sp                                  ASM_LINE_SEP    \
1031 +    LDW_PIC                                            ASM_LINE_SEP    \
1032 +    /* compare error */                                        ASM_LINE_SEP    \
1033 +    ldi NO_ERROR,%r1                                   ASM_LINE_SEP    \
1034 +    /* branch if no error */                           ASM_LINE_SEP    \
1035 +    cmpb,>>=,n %r1,%r4,Lpre_end                                ASM_LINE_SEP    \
1036 +    nop                                                        ASM_LINE_SEP    \
1037 +    SYSCALL_ERROR_HANDLER                              ASM_LINE_SEP    \
1038 +    ldo 64(%sp), %sp                                   ASM_LINE_SEP    \
1039 +    ldo -64(%sp), %sp                                  ASM_LINE_SEP    \
1040 +    /* No need to LDW_PIC */                           ASM_LINE_SEP    \
1041 +    /* make syscall res value positive */              ASM_LINE_SEP    \
1042 +    sub %r0, %r4, %r4                                  ASM_LINE_SEP    \
1043 +    /* store into errno location */                    ASM_LINE_SEP    \
1044 +    stw %r4, 0(%sr0,%ret0)                             ASM_LINE_SEP    \
1045 +    /* return -1 */                                    ASM_LINE_SEP    \
1046 +    ldo -1(%r0), %ret0                                 ASM_LINE_SEP    \
1047 +  Lpre_end:                                            ASM_LINE_SEP    \
1048 +    ldw -20(%sr0,%sp), %rp                                     ASM_LINE_SEP    \
1049 +    /* No need to LDW_PIC */                           ASM_LINE_SEP    \
1050 +    ldw -36(%sr0,%sp), %r4                             ASM_LINE_SEP
1051 +
1052 +/* Save arguments into our frame */
1053 +# define PUSHARGS_0    /* nothing to do */
1054 +# define PUSHARGS_1    PUSHARGS_0 stw %r26, -36(%sr0,%sp)      ASM_LINE_SEP
1055 +# define PUSHARGS_2    PUSHARGS_1 stw %r25, -40(%sr0,%sp)      ASM_LINE_SEP
1056 +# define PUSHARGS_3    PUSHARGS_2 stw %r24, -44(%sr0,%sp)      ASM_LINE_SEP
1057 +# define PUSHARGS_4    PUSHARGS_3 stw %r23, -48(%sr0,%sp)      ASM_LINE_SEP
1058 +# define PUSHARGS_5    PUSHARGS_4 /* Args are on the stack... */
1059 +# define PUSHARGS_6    PUSHARGS_5
1060 +
1061 +/* Bring them back from the stack */
1062 +# define POPARGS_0     /* nothing to do */
1063 +# define POPARGS_1     POPARGS_0 ldw -36(%sr0,%sp), %r26       ASM_LINE_SEP
1064 +# define POPARGS_2     POPARGS_1 ldw -40(%sr0,%sp), %r25       ASM_LINE_SEP
1065 +# define POPARGS_3     POPARGS_2 ldw -44(%sr0,%sp), %r24       ASM_LINE_SEP
1066 +# define POPARGS_4     POPARGS_3 ldw -48(%sr0,%sp), %r23       ASM_LINE_SEP
1067 +# define POPARGS_5     POPARGS_4 ldw -52(%sr0,%sp), %r22       ASM_LINE_SEP
1068 +# define POPARGS_6     POPARGS_5 ldw -56(%sr0,%sp), %r21       ASM_LINE_SEP
1069 +
1070 +# ifdef IS_IN_libpthread
1071 +#  ifdef PIC
1072 +#   define CENABLE .import __pthread_enable_asynccancel,code ASM_LINE_SEP \
1073 +                       bl __pthread_enable_asynccancel,%r2 ASM_LINE_SEP
1074 +#   define CDISABLE .import __pthread_disable_asynccancel,code ASM_LINE_SEP \
1075 +                       bl __pthread_disable_asynccancel,%r2 ASM_LINE_SEP 
1076 +#  else
1077 +#   define CENABLE .import __pthread_enable_asynccancel,code ASM_LINE_SEP \
1078 +                       bl __pthread_enable_asynccancel,%r2 ASM_LINE_SEP
1079 +#   define CDISABLE .import __pthread_disable_asynccancel,code ASM_LINE_SEP \
1080 +                       bl __pthread_disable_asynccancel,%r2 ASM_LINE_SEP 
1081 +#  endif
1082 +# elif !defined NOT_IN_libc
1083 +#  ifdef PIC
1084 +#   define CENABLE .import __libc_enable_asynccancel,code ASM_LINE_SEP \
1085 +                       bl __libc_enable_asynccancel,%r2 ASM_LINE_SEP 
1086 +#   define CDISABLE    .import __libc_disable_asynccancel,code ASM_LINE_SEP \
1087 +                       bl __libc_disable_asynccancel,%r2 ASM_LINE_SEP 
1088 +#  else
1089 +#   define CENABLE .import __libc_enable_asynccancel,code ASM_LINE_SEP \
1090 +                       bl __libc_enable_asynccancel,%r2 ASM_LINE_SEP 
1091 +#   define CDISABLE    .import __libc_disable_asynccancel,code ASM_LINE_SEP \
1092 +                       bl __libc_disable_asynccancel,%r2 ASM_LINE_SEP 
1093 +#  endif
1094 +# else
1095 +#  ifdef PIC
1096 +#   define CENABLE .import __librt_enable_asynccancel,code ASM_LINE_SEP \
1097 +                       bl __librt_enable_asynccancel,%r2 ASM_LINE_SEP 
1098 +#   define CDISABLE .import __librt_disable_asynccancel,code ASM_LINE_SEP \
1099 +                       bl __librt_disable_asynccancel,%r2 ASM_LINE_SEP 
1100 +#  else
1101 +#   define CENABLE .import __librt_enable_asynccancel,code ASM_LINE_SEP \
1102 +                       bl __librt_enable_asynccancel,%r2 ASM_LINE_SEP 
1103 +#   define CDISABLE .import __librt_disable_asynccancel,code ASM_LINE_SEP \
1104 +                       bl __librt_disable_asynccancel,%r2 ASM_LINE_SEP 
1105 +#  endif
1106 +# endif        
1107 +
1108 +/* p_header.multiple_threads is +12 from the pthread_descr struct start,
1109 +   We could have called __get_cr27() but we really want less overhead */
1110 +# define MULTIPLE_THREADS_OFFSET 0xC
1111 +
1112 +/* cr27 has been initialized to 0x0 by kernel */
1113 +# define NO_THREAD_CR27 0x0
1114 +                       
1115 +# ifdef IS_IN_libpthread
1116 +#  define __local_multiple_threads __pthread_multiple_threads
1117 +# elif !defined NOT_IN_libc
1118 +#  define __local_multiple_threads __libc_multiple_threads
1119 +# else
1120 +#  define __local_multiple_threads __librt_multiple_threads
1121 +# endif
1122 +
1123 +# ifndef __ASSEMBLER__
1124 + extern int __local_multiple_threads attribute_hidden;
1125 +#  define SINGLE_THREAD_P __builtin_expect (__local_multiple_threads == 0, 1)
1126 +# else
1127 +/* This ALT version requires newer kernel support */
1128 +#  define SINGLE_THREAD_P_MFCTL                                                \
1129 +       mfctl %cr27, %ret0                                      ASM_LINE_SEP    \
1130 +       cmpib,= NO_THREAD_CR27,%ret0,Lstp                       ASM_LINE_SEP    \
1131 +       nop                                                     ASM_LINE_SEP    \
1132 +       ldw MULTIPLE_THREADS_OFFSET(%sr0,%ret0),%ret0           ASM_LINE_SEP    \
1133 + Lstp:                                                         ASM_LINE_SEP    
1134 +#  ifdef PIC
1135 +/* Slower version uses GOT to get value of __local_multiple_threads */
1136 +#   define SINGLE_THREAD_P                                                     \
1137 +       addil LT%__local_multiple_threads, %r19                 ASM_LINE_SEP    \
1138 +       ldw RT%__local_multiple_threads(%sr0,%r1), %ret0        ASM_LINE_SEP    \
1139 +       ldw 0(%sr0,%ret0), %ret0                                ASM_LINE_SEP
1140 +#  else
1141 +  /* Slow non-pic version using DP */
1142 +#   define SINGLE_THREAD_P                                                             \
1143 +       addil LR%__local_multiple_threads-$global$,%r27                 ASM_LINE_SEP    \
1144 +       ldw RR%__local_multiple_threads-$global$(%sr0,%r1),%ret0        ASM_LINE_SEP
1145 +#  endif       
1146 +# endif
1147 +#elif !defined __ASSEMBLER__
1148 +
1149 +/* This code should never be used but we define it anyhow.  */
1150 +# define SINGLE_THREAD_P (1)
1151 +
1152 +#endif
1153 +/* !defined NOT_IN_libc || defined IS_IN_libpthread */
1154 +
1155 +
1156 --- glibc-2.3.2-orig-debian/localedata/gen-locale.sh    2003-10-22 01:03:54.000000000 -0400
1157 +++ glibc-2.3.2/localedata/gen-locale.sh        2003-10-22 01:07:38.000000000 -0400
1158 @@ -47,6 +47,7 @@
1159  locale=`echo $locfile|sed 's|\([^.]*\)[.].*/LC_CTYPE|\1|'`
1160  charmap=`echo $locfile|sed 's|[^.]*[.]\(.*\)/LC_CTYPE|\1|'`
1161  
1162 +echo "Running \"$0 $common_objpfx $localedef $locfile\""
1163  echo "Generating locale $locale.$charmap: this might take a while..."
1164  generate_locale `echo $charmap | sed -e s/SJIS/SHIFT_JIS/` $locale \
1165                 $locale.$charmap
1166 --- glibc-2.3.2-orig-debian/malloc/thread-m.h   2003-10-22 01:06:10.000000000 -0400
1167 +++ glibc-2.3.2/malloc/thread-m.h       2003-10-22 01:07:38.000000000 -0400
1168 @@ -59,6 +59,28 @@
1169  #define mutex_unlock(m)                \
1170    __libc_maybe_call2 (pthread_mutex_unlock, (m), (*(int *)(m) = 0))
1171  
1172 +# if(defined __hppa__)
1173 +/* Since our lock structure does not tolerate being initialized to zero, we must
1174 +   modify the standard function calls made by malloc */
1175 +#  undef mutex_init
1176 +#  undef mutex_lock
1177 +#  undef mutex_trylock
1178 +#  undef mutex_unlock
1179 +#  define mutex_init(m)                \
1180 +       __libc_maybe_call (__pthread_mutex_init, (m, NULL), \
1181 +                       (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT),(*(int *)(m))) )
1182 +#  define mutex_lock(m)                \
1183 +       __libc_maybe_call (__pthread_mutex_lock, (m), \
1184 +                       (__load_and_clear(&((m)->__m_lock.__spinlock)), 0))
1185 +#  define mutex_trylock(m)     \
1186 +       __libc_maybe_call (__pthread_mutex_trylock, (m), \
1187 +                       (*(int *)(m) ? 1 : (__load_and_clear(&((m)->__m_lock.__spinlock)), 0)))
1188 +#  define mutex_unlock(m)      \
1189 +       __libc_maybe_call (__pthread_mutex_unlock, (m), \
1190 +                       (((m)->__m_lock.__spinlock = __LT_SPINLOCK_INIT), (*(int *)(m))) )
1191 +# endif 
1192 +/* if(defined __hppa__) */
1193 +       
1194  #else
1195  
1196  #define mutex_init(m)          \
1197 --- glibc-2.3.2-orig-debian/sysdeps/generic/framestate.c        2003-10-22 01:03:47.000000000 -0400
1198 +++ glibc-2.3.2/sysdeps/generic/framestate.c    2003-10-22 01:07:38.000000000 -0400
1199 @@ -41,7 +41,11 @@
1200        if (handle == NULL
1201           || (frame_state_for
1202               = (framesf) __libc_dlsym (handle, "__frame_state_for")) == NULL)
1203 +#ifndef __USING_SJLJ_EXCEPTIONS__
1204         frame_state_for = fallback_frame_state_for;
1205 +#else
1206 +       frame_state_for = abort;
1207 +#endif
1208      }
1209  
1210    return frame_state_for (pc, frame_state);
1211 --- glibc-2.3.2-orig-debian/sysdeps/generic/unwind-dw2.c        2003-10-22 01:06:11.000000000 -0400
1212 +++ glibc-2.3.2/sysdeps/generic/unwind-dw2.c    2003-10-22 01:08:07.000000000 -0400
1213 @@ -39,7 +39,6 @@
1214  #endif
1215  
1216  
1217 -#ifndef __USING_SJLJ_EXCEPTIONS__
1218  
1219  #ifndef STACK_GROWS_DOWNWARD
1220  #define STACK_GROWS_DOWNWARD 0
1221 @@ -1287,4 +1286,3 @@
1222  #include "unwind.inc"
1223  
1224  #endif /* _LIBC */
1225 -#endif /* !USING_SJLJ_EXCEPTIONS */
1226 --- glibc-2.3.2-orig-debian/sysdeps/hppa/Dist   2003-10-22 01:03:47.000000000 -0400
1227 +++ glibc-2.3.2/sysdeps/hppa/Dist       2003-10-22 01:07:38.000000000 -0400
1228 @@ -1,2 +1,3 @@
1229 +libgcc-compat.c
1230  dl-symaddr.c
1231  dl-fptr.c
1232 --- glibc-2.3.2-orig-debian/sysdeps/hppa/Makefile       2003-10-22 01:03:47.000000000 -0400
1233 +++ glibc-2.3.2/sysdeps/hppa/Makefile   2003-10-22 01:07:38.000000000 -0400
1234 @@ -22,12 +22,19 @@
1235  # CFLAGS-.os += -ffunction-sections
1236  LDFLAGS-c_pic.os += -Wl,--unique=.text*
1237  
1238 -ifeq ($(subdir),malloc)
1239 -CFLAGS-malloc.c += -DMALLOC_ALIGNMENT=16
1240 -endif
1241 -
1242  ifeq ($(subdir),elf)
1243  CFLAGS-rtld.c += -mdisable-fpregs
1244  dl-routines += dl-symaddr dl-fptr
1245  rtld-routines += dl-symaddr dl-fptr
1246  endif
1247 +
1248 +ifeq ($(subdir),csu)
1249 +ifeq (yes,$(build-shared))
1250 +# Compatibility
1251 +ifeq (yes,$(have-protected))
1252 +CPPFLAGS-libgcc-compat.c = -DHAVE_DOT_HIDDEN
1253 +endif
1254 +sysdep_routines += libgcc-compat
1255 +shared-only-routines += libgcc-compat
1256 +endif
1257 +endif
1258 --- glibc-2.3.2-orig-debian/sysdeps/hppa/Versions       2003-10-22 01:03:47.000000000 -0400
1259 +++ glibc-2.3.2/sysdeps/hppa/Versions   2003-10-22 01:07:38.000000000 -0400
1260 @@ -5,3 +5,8 @@
1261      _dl_function_address;
1262    }
1263  }
1264 +libc {
1265 +  GLIBC_2.2 {
1266 +    __clz_tab;
1267 +  }
1268 +}
1269 --- glibc-2.3.2-orig-debian/sysdeps/hppa/atomicity.h    1969-12-31 19:00:00.000000000 -0500
1270 +++ glibc-2.3.2/sysdeps/hppa/atomicity.h        2003-10-22 01:07:38.000000000 -0400
1271 @@ -0,0 +1,55 @@
1272 +/* Low-level functions for atomic operations.  HP-PARISC version.
1273 +   Copyright (C) 1997,2001 Free Software Foundation, Inc.
1274 +   This file is part of the GNU C Library.
1275 +
1276 +   The GNU C Library is free software; you can redistribute it and/or
1277 +   modify it under the terms of the GNU Lesser General Public
1278 +   License as published by the Free Software Foundation; either
1279 +   version 2.1 of the License, or (at your option) any later version.
1280 +
1281 +   The GNU C Library is distributed in the hope that it will be useful,
1282 +   but WITHOUT ANY WARRANTY; without even the implied warranty of
1283 +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1284 +   Lesser General Public License for more details.
1285 +
1286 +   You should have received a copy of the GNU Lesser General Public
1287 +   License along with the GNU C Library; if not, write to the Free
1288 +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
1289 +   02111-1307 USA.  */
1290 +
1291 +#ifndef _ATOMICITY_H
1292 +#define _ATOMICITY_H   1
1293 +
1294 +#include <inttypes.h>
1295 +
1296 +#warning stub atomicity functions are not atomic
1297 +#warning CAO This will get implemented soon
1298 +
1299 +static inline int
1300 +__attribute__ ((unused))
1301 +exchange_and_add (volatile uint32_t *mem, int val)
1302 +{
1303 +  int result = *mem;
1304 +  *mem += val;
1305 +  return result;
1306 +}
1307 +
1308 +static inline void
1309 +__attribute__ ((unused))
1310 +atomic_add (volatile uint32_t *mem, int val)
1311 +{
1312 +  *mem += val;
1313 +}
1314 +
1315 +static inline int
1316 +__attribute__ ((unused))
1317 +compare_and_swap (volatile long int *p, long int oldval, long int newval)
1318 +{
1319 +  if (*p != oldval)
1320 +    return 0;
1321 +
1322 +  *p = newval;
1323 +  return 1;
1324 +}
1325 +
1326 +#endif /* atomicity.h */
1327 --- glibc-2.3.2-orig-debian/sysdeps/hppa/dl-fptr.c      2003-10-22 01:03:47.000000000 -0400
1328 +++ glibc-2.3.2/sysdeps/hppa/dl-fptr.c  2003-10-22 01:07:38.000000000 -0400
1329 @@ -30,7 +30,7 @@
1330  # include <pt-machine.h>
1331  
1332  /* Remember, we use 0 to mean that a lock is taken on PA-RISC. */
1333 -static int __hppa_fptr_lock = 1;
1334 +static __atomic_lock_t __hppa_fptr_lock = __LT_SPINLOCK_ALT_INIT;
1335  #endif
1336  
1337  /* Because ld.so is now versioned, these functions can be in their own
1338 @@ -127,7 +127,7 @@
1339  #ifdef _LIBC_REENTRANT
1340    /* Release the lock.  Again, remember, zero means the lock is taken!  */
1341    if (mem == NULL)
1342 -    __hppa_fptr_lock = 1;
1343 +    __hppa_fptr_lock = __LT_SPINLOCK_INIT;
1344  #endif
1345  
1346    /* Set bit 30 to indicate to $$dyncall that this is a PLABEL. */
1347 @@ -180,7 +180,7 @@
1348  
1349  #ifdef _LIBC_REENTRANT
1350    /* Release the lock. */
1351 -  __hppa_fptr_lock = 1;
1352 +  __hppa_fptr_lock = __LT_SPINLOCK_INIT;
1353  #endif
1354  }
1355  
1356 @@ -190,6 +190,8 @@
1357    Elf32_Addr addr = (Elf32_Addr) address;
1358    struct hppa_fptr *f;
1359  
1360 +  address = (void *)((unsigned long)address &~ 3); /* Clear the bottom two bits.  See make_fptr. */
1361 +  
1362  #ifdef _LIBC_REENTRANT
1363    /* Make sure we are alone.  */
1364    while (testandset (&__hppa_fptr_lock));
1365 @@ -204,7 +206,7 @@
1366  
1367  #ifdef _LIBC_REENTRANT
1368    /* Release the lock.   */
1369 -  __hppa_fptr_lock = 1;
1370 +  __hppa_fptr_lock = __LT_SPINLOCK_INIT;
1371  #endif
1372  
1373    return addr;
1374 --- glibc-2.3.2-orig-debian/sysdeps/hppa/dl-machine.h   2003-10-22 01:06:11.000000000 -0400
1375 +++ glibc-2.3.2/sysdeps/hppa/dl-machine.h       2003-10-22 01:10:26.000000000 -0400
1376 @@ -1,5 +1,5 @@
1377  /* Machine-dependent ELF dynamic relocation inline functions.  PA-RISC version.
1378 -   Copyright (C) 1995-1997,1999,2000,2001,2002, 2003
1379 +   Copyright (C) 1995-1997,1999-2003
1380         Free Software Foundation, Inc.
1381     Contributed by David Huggins-Daines <dhd@debian.org>
1382     This file is part of the GNU C Library.
1383 @@ -29,8 +29,15 @@
1384  #include <link.h>
1385  #include <assert.h>
1386  
1387 +# define VALID_ELF_OSABI(osabi)                ((osabi == ELFOSABI_SYSV) || (osabi == ELFOSABI_LINUX))
1388 +# define VALID_ELF_ABIVERSION(ver)     (ver == 0)
1389 +# define VALID_ELF_HEADER(hdr,exp,size) \
1390 +  memcmp (hdr,exp,size-2) == 0 \
1391 +  && VALID_ELF_OSABI (hdr[EI_OSABI]) \
1392 +  && VALID_ELF_ABIVERSION (hdr[EI_ABIVERSION])
1393 +
1394  /* These must match the definition of the stub in bfd/elf32-hppa.c. */
1395 -#define SIZEOF_PLT_STUB (4*4)
1396 +#define SIZEOF_PLT_STUB (7*4)
1397  #define GOT_FROM_PLT_STUB (4*4)
1398  
1399  /* A PLABEL is a function descriptor.  Properly they consist of just
1400 @@ -67,45 +74,41 @@
1401    return ehdr->e_machine == EM_PARISC;
1402  }
1403  
1404 -
1405  /* Return the link-time address of _DYNAMIC.  */
1406  static inline Elf32_Addr
1407 +elf_machine_dynamic (void) __attribute__ ((const));
1408 +
1409 +static inline Elf32_Addr
1410  elf_machine_dynamic (void)
1411  {
1412    Elf32_Addr dynamic;
1413  
1414 -#if 0
1415 -  /* Use this method if GOT address not yet set up.  */
1416 -  asm (
1417 -"      b,l     1f,%0\n"
1418 +  asm ("b,l    1f,%0\n"
1419  "      depi    0,31,2,%0\n"
1420  "1:    addil   L'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 8),%0\n"
1421  "      ldw     R'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 12)(%%r1),%0\n"
1422 -      : "=r" (dynamic) : : "r1");
1423 -#else
1424 -  /* This works because we already have our GOT address available.  */
1425 -  dynamic = (Elf32_Addr) &_DYNAMIC;
1426 -#endif
1427 +       : "=r" (dynamic) : : "r1");
1428  
1429    return dynamic;
1430  }
1431  
1432  /* Return the run-time load address of the shared object.  */
1433  static inline Elf32_Addr
1434 +elf_machine_load_address (void) __attribute__ ((const));
1435 +
1436 +static inline Elf32_Addr
1437  elf_machine_load_address (void)
1438  {
1439 -  Elf32_Addr dynamic, dynamic_linkaddress;
1440 +  Elf32_Addr dynamic;
1441  
1442    asm (
1443  "      b,l     1f,%0\n"
1444  "      depi    0,31,2,%0\n"
1445  "1:    addil   L'_DYNAMIC - ($PIC_pcrel$0 - 8),%0\n"
1446 -"      ldo     R'_DYNAMIC - ($PIC_pcrel$0 - 12)(%%r1),%1\n"
1447 -"      addil   L'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 16),%0\n"
1448 -"      ldw     R'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 20)(%%r1),%0\n"
1449 -   : "=r" (dynamic_linkaddress), "=r" (dynamic) : : "r1");
1450 +"      ldo     R'_DYNAMIC - ($PIC_pcrel$0 - 12)(%%r1),%0\n"
1451 +   : "=r" (dynamic) : : "r1");
1452  
1453 -  return dynamic - dynamic_linkaddress;
1454 +  return dynamic - elf_machine_dynamic ();
1455  }
1456  
1457  /* Fixup a PLT entry to bounce directly to the function at VALUE.  */
1458 @@ -168,41 +171,39 @@
1459               fptr = (struct hppa_fptr *) (reloc->r_offset + l_addr);
1460               if (r_sym != 0)
1461                 {
1462 -                 /* Relocate the pointer to the stub.  */
1463 -                 fptr->func += l_addr;
1464 -                 /* Instead of the LTP value, we put the reloc offset
1465 -                    here.  The trampoline code will load the proper
1466 -                    LTP and pass the reloc offset to the fixup
1467 -                    function.  */
1468 -                 fptr->gp = iplt - jmprel;
1469                   if (!got)
1470                     {
1471                       static union {
1472                         unsigned char c[8];
1473                         Elf32_Addr i[2];
1474                       } sig = {{0x00,0xc0,0xff,0xee, 0xde,0xad,0xbe,0xef}};
1475 +                     const Elf32_Rela *last_rel;
1476 +
1477 +                     last_rel = (const Elf32_Rela *) end_jmprel - 1;
1478 +
1479 +                     /* The stub is immediately after the last .plt
1480 +                        entry.  Rely on .plt relocs being ordered.  */
1481 +                     if (last_rel->r_offset == 0)
1482 +                       return 0;
1483  
1484                       /* Find our .got section.  It's right after the
1485                          stub.  */
1486 -                     got = (Elf32_Addr *) (fptr->func + GOT_FROM_PLT_STUB);
1487 +                     got = (Elf32_Addr *) (last_rel->r_offset + l_addr
1488 +                                           + 8 + SIZEOF_PLT_STUB);
1489  
1490 -                     /* Sanity check to see if the address we are
1491 -                         going to check below is within a reasonable
1492 -                         approximation of the bounds of the PLT (or,
1493 -                         at least, is at an address that won't fault
1494 -                         on read).  Then check for the magic signature
1495 -                         above. */
1496 -                     if (fptr->func < (Elf32_Addr) fptr + sizeof(*fptr))
1497 -                         return 0;
1498 -                     if (fptr->func >
1499 -                         ((Elf32_Addr) fptr
1500 -                          + SIZEOF_PLT_STUB
1501 -                          + ((l->l_info[DT_PLTRELSZ]->d_un.d_val / sizeof (Elf32_Rela))
1502 -                             * 8)))
1503 -                       return 0;
1504 +                     /* Check the magic signature.  */
1505                       if (got[-2] != sig.i[0] || got[-1] != sig.i[1])
1506                         return 0; /* No lazy linking for you! */
1507                     }
1508 +
1509 +                 /* Relocate the pointer to the stub.  */
1510 +                 fptr->func = (Elf32_Addr) got - GOT_FROM_PLT_STUB;
1511 +
1512 +                 /* Instead of the LTP value, we put the reloc offset
1513 +                    here.  The trampoline code will load the proper
1514 +                    LTP and pass the reloc offset to the fixup
1515 +                    function.  */
1516 +                 fptr->gp = iplt - jmprel;
1517                 }
1518               else
1519                 {
1520 @@ -272,22 +273,24 @@
1521  "      stw     %r25,-40(%sp)\n" /* argc */                             \
1522  "      stw     %r24,-44(%sp)\n" /* argv */                             \
1523                                                                         \
1524 -       /* We need the LTP, and we need it now. */                      \
1525 -       /* $PIC_pcrel$0 points 8 bytes past the current instruction,    \
1526 -          just like a branch reloc.  This sequence gets us the runtime \
1527 -          address of _DYNAMIC. */                                      \
1528 +       /* We need the LTP, and we need it now.                         \
1529 +          $PIC_pcrel$0 points 8 bytes past the current instruction,    \
1530 +          just like a branch reloc.  This sequence gets us the         \
1531 +          runtime address of _DYNAMIC. */                              \
1532  "      bl      0f,%r19\n"                                              \
1533  "      depi    0,31,2,%r19\n"  /* clear priviledge bits */             \
1534  "0:    addil   L'_DYNAMIC - ($PIC_pcrel$0 - 8),%r19\n"                 \
1535  "      ldo     R'_DYNAMIC - ($PIC_pcrel$0 - 12)(%r1),%r26\n"           \
1536                                                                         \
1537 -       /* Also get the link time address from the first entry of the GOT.  */ \
1538 +       /* The link time address is stored in the first entry of the    \
1539 +          GOT.  */                                                     \
1540  "      addil   L'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 16),%r19\n"   \
1541  "      ldw     R'_GLOBAL_OFFSET_TABLE_ - ($PIC_pcrel$0 - 20)(%r1),%r20\n" \
1542                                                                         \
1543  "      sub     %r26,%r20,%r20\n"       /* Calculate load offset */     \
1544                                                                         \
1545 -       /* Rummage through the dynamic entries, looking for DT_PLTGOT.  */ \
1546 +       /* Rummage through the dynamic entries, looking for             \
1547 +          DT_PLTGOT.  */                                               \
1548  "      ldw,ma  8(%r26),%r19\n"                                         \
1549  "1:    cmpib,=,n 3,%r19,2f\n"  /* tag == DT_PLTGOT? */                 \
1550  "      cmpib,<>,n 0,%r19,1b\n"                                         \
1551 @@ -307,8 +310,8 @@
1552            |         32 bytes of magic       |                          \
1553            |---------------------------------|                          \
1554            | 32 bytes argument/sp save area  |                          \
1555 -          |---------------------------------|  ((current->mm->env_end) + 63 & ~63) \
1556 -          |         N bytes of slack        |                          \
1557 +          |---------------------------------|  ((current->mm->env_end) \
1558 +          |         N bytes of slack        |   + 63 & ~63)            \
1559            |---------------------------------|                          \
1560            |      envvar and arg strings     |                          \
1561            |---------------------------------|                          \
1562 @@ -376,7 +379,7 @@
1563  "      bl      _dl_init_internal,%r2\n"                                \
1564  "      ldo     4(%r23),%r23\n" /* delay slot */                        \
1565                                                                         \
1566 -       /* Reload argc, argv  to the registers start.S expects them in (feh) */ \
1567 +       /* Reload argc, argv to the registers start.S expects.  */      \
1568  "      ldw     -40(%sp),%r25\n"                                        \
1569  "      ldw     -44(%sp),%r24\n"                                        \
1570                                                                         \
1571 @@ -388,8 +391,8 @@
1572  "      .word   0xdeadbeef\n"                                           \
1573  "      .previous\n"                                                    \
1574                                                                         \
1575 -       /* %r3 contains a function pointer, we need to mask out the lower \
1576 -        * bits and load the gp and jump address. */                    \
1577 +       /* %r3 contains a function pointer, we need to mask out the     \
1578 +          lower bits and load the gp and jump address. */              \
1579  "      depi    0,31,2,%r3\n"                                           \
1580  "      ldw     0(%r3),%r2\n"                                           \
1581  "      addil   LT'__dl_fini_plabel,%r19\n"                             \
1582 @@ -407,43 +410,41 @@
1583     Enter with r19 = reloc offset, r20 = got-8, r21 = fixup ltp.  */
1584  #define TRAMPOLINE_TEMPLATE(tramp_name, fixup_name) \
1585    extern void tramp_name (void);                   \
1586 -  asm ( "\
1587 -       /* Trampoline for " #tramp_name " */                                \n\
1588 -       .globl " #tramp_name "                                              \n\
1589 -       .type " #tramp_name ",@function                                     \n\
1590 -" #tramp_name ":                                                           \n\
1591 -       /* Save return pointer */                                           \n\
1592 -       stw     %r2,-20(%sp)                                                \n\
1593 -       /* Save argument registers in the call stack frame. */              \n\
1594 -       stw     %r26,-36(%sp)                                               \n\
1595 -       stw     %r25,-40(%sp)                                               \n\
1596 -       stw     %r24,-44(%sp)                                               \n\
1597 -       stw     %r23,-48(%sp)                                               \n\
1598 -       /* Build a call frame. */                                           \n\
1599 -       stwm    %sp,64(%sp)                                                 \n\
1600 -                                                                           \n\
1601 -       /* Set up args to fixup func.  */                                   \n\
1602 -       ldw     8+4(%r20),%r26  /* got[1] == struct link_map *  */          \n\
1603 -       copy    %r19,%r25       /* reloc offset  */                         \n\
1604 -                                                                           \n\
1605 -       /* Call the real address resolver. */                               \n\
1606 -       bl      " #fixup_name ",%r2                                         \n\
1607 -       copy    %r21,%r19       /* delay slot, set fixup func ltp */        \n\
1608 -                                                                           \n\
1609 -       ldwm    -64(%sp),%sp                                                \n\
1610 -       /* Arguments. */                                                    \n\
1611 -       ldw     -36(%sp),%r26                                               \n\
1612 -       ldw     -40(%sp),%r25                                               \n\
1613 -       ldw     -44(%sp),%r24                                               \n\
1614 -       ldw     -48(%sp),%r23                                               \n\
1615 -       /* Return pointer. */                                               \n\
1616 -       ldw     -20(%sp),%r2                                                \n\
1617 -       /* Call the real function. */                                       \n\
1618 -       ldw     0(%r28),%r22                                                \n\
1619 -       bv      %r0(%r22)                                                   \n\
1620 -       ldw     4(%r28),%r19                                                \n\
1621 -");
1622 -
1623 +  asm (".globl " #tramp_name "\n"                                      \
1624 + "     .type " #tramp_name ",@function\n"                              \
1625 +  #tramp_name ":\n"                                                    \
1626 +       /* Save return pointer */                                       \
1627 + "     stw     %r2,-20(%sp)\n"                                         \
1628 +       /* Save argument registers in the call stack frame. */          \
1629 + "     stw     %r26,-36(%sp)\n"                                        \
1630 + "     stw     %r25,-40(%sp)\n"                                        \
1631 + "     stw     %r24,-44(%sp)\n"                                        \
1632 + "     stw     %r23,-48(%sp)\n"                                        \
1633 +       /* Build a call frame, and save structure pointer. */           \
1634 + "     stwm    %r28,64(%sp)\n"                                         \
1635 +                                                                       \
1636 +       /* Set up args to fixup func.  */                               \
1637 + "     ldw     8+4(%r20),%r26\n" /* got[1] == struct link_map *  */    \
1638 + "     copy    %r19,%r25\n"      /* reloc offset  */                   \
1639 +                                                                       \
1640 +       /* Call the real address resolver. */                           \
1641 + "     bl      " #fixup_name ",%r2\n"                                  \
1642 + "     copy    %r21,%r19\n"      /* delay slot, set fixup func ltp */  \
1643 +                                                                       \
1644 + "     ldw     0(%r28),%r22\n"   /* load up the returned func ptr */   \
1645 + "     ldw     4(%r28),%r19\n"                                         \
1646 + "     ldwm    -64(%sp),%r28\n"                                        \
1647 +       /* Arguments. */                                                \
1648 + "     ldw     -36(%sp),%r26\n"                                        \
1649 + "     ldw     -40(%sp),%r25\n"                                        \
1650 + "     ldw     -44(%sp),%r24\n"                                        \
1651 + "     ldw     -48(%sp),%r23\n"                                        \
1652 +       /* Call the real function. */                                   \
1653 + "     bv      %r0(%r22)\n"                                            \
1654 +       /* Return pointer. */                                           \
1655 + "     ldw     -20(%sp),%r2\n"                                         \
1656 +        );
1657 +  
1658  #ifndef PROF
1659  #define ELF_MACHINE_RUNTIME_TRAMPOLINE                 \
1660    TRAMPOLINE_TEMPLATE (_dl_runtime_resolve, fixup);    \
1661 @@ -470,16 +471,32 @@
1662  /* We only use RELA. */
1663  #define ELF_MACHINE_NO_REL 1
1664  
1665 +/* Tell dynamic-link that PA needs the extra link_map structure */
1666 +#define ELF_MACHINE_REL_RELATIVE_NEEDSLINKMAP 1
1667 +#define ELF_MACHINE_RELA_RELATIVE_NEEDSLINKMAP 1
1668 +       
1669  /* Return the address of the entry point. */
1670  #define ELF_MACHINE_START_ADDRESS(map, start) \
1671    DL_FUNCTION_ADDRESS (map, start)
1672  
1673 +/* We define an initialization functions.  This is called very early in
1674 + *    _dl_sysdep_start.  */
1675 +#define DL_PLATFORM_INIT dl_platform_init ()
1676 +
1677 +static inline void __attribute__ ((unused))
1678 +dl_platform_init (void)
1679 +{
1680 +       if (GL(dl_platform) != NULL && *GL(dl_platform) == '\0')
1681 +       /* Avoid an empty string which would disturb us.  */
1682 +               GL(dl_platform) = NULL;
1683 +}
1684 +       
1685  #endif /* !dl_machine_h */
1686  
1687  /* These are only actually used where RESOLVE_MAP is defined, anyway. */
1688  #ifdef RESOLVE_MAP
1689  
1690 -static inline void
1691 +auto void __attribute__((always_inline))
1692  elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
1693                   const Elf32_Sym *sym, const struct r_found_version *version,
1694                   void *const reloc_addr_arg)
1695 @@ -569,15 +586,15 @@
1696            probably haven't relocated the necessary values by this
1697            point so we have to find them ourselves. */
1698  
1699 -       asm ("bl        0f,%0                                               \n\
1700 -             depi      0,31,2,%0                                           \n\
1701 -0:           addil     L'__boot_ldso_fptr - ($PIC_pcrel$0 - 8),%0          \n\
1702 -             ldo       R'__boot_ldso_fptr - ($PIC_pcrel$0 - 12)(%%r1),%1   \n\
1703 -             addil     L'__fptr_root - ($PIC_pcrel$0 - 16),%0              \n\
1704 -             ldo       R'__fptr_root - ($PIC_pcrel$0 - 20)(%%r1),%2        \n\
1705 -             addil     L'__fptr_count - ($PIC_pcrel$0 - 24),%0             \n\
1706 -             ldo       R'__fptr_count - ($PIC_pcrel$0 - 28)(%%r1),%3"
1707 -            :
1708 +       asm ("bl        0f,%0\n\t"
1709 +            "depi      0,31,2,%0\n\t"
1710 +            "0:\taddil L'__boot_ldso_fptr - ($PIC_pcrel$0 - 8),%0\n\t"
1711 +            "ldo       R'__boot_ldso_fptr - ($PIC_pcrel$0 - 12)(%%r1),%1\n\t"
1712 +            "addil     L'__fptr_root - ($PIC_pcrel$0 - 16),%0\n\t"
1713 +            "ldo       R'__fptr_root - ($PIC_pcrel$0 - 20)(%%r1),%2\n\t"
1714 +            "addil     L'__fptr_count - ($PIC_pcrel$0 - 24),%0\n\t"
1715 +            "ldo       R'__fptr_count - ($PIC_pcrel$0 - 28)(%%r1),%3"
1716 +            :
1717              "=r" (dot),
1718              "=r" (p_boot_ldso_fptr),
1719              "=r" (p_fptr_root),
1720 @@ -636,7 +653,7 @@
1721  
1722  /* hppa doesn't have an R_PARISC_RELATIVE reloc, but uses relocs with
1723     ELF32_R_SYM (info) == 0 for a similar purpose.  */
1724 -static inline void
1725 +auto void __attribute__((always_inline))
1726  elf_machine_rela_relative (struct link_map *map, Elf32_Addr l_addr,
1727                            const Elf32_Rela *reloc,
1728                            void *const reloc_addr_arg)
1729 @@ -682,7 +699,7 @@
1730    *reloc_addr = value;
1731  }
1732  
1733 -static inline void
1734 +auto void __attribute__((always_inline))
1735  elf_machine_lazy_rel (struct link_map *map,
1736                       Elf32_Addr l_addr, const Elf32_Rela *reloc)
1737  {
1738 --- glibc-2.3.2-orig-debian/sysdeps/hppa/elf/entry.h    1969-12-31 19:00:00.000000000 -0500
1739 +++ glibc-2.3.2/sysdeps/hppa/elf/entry.h        2003-10-22 01:07:38.000000000 -0400
1740 @@ -0,0 +1,10 @@
1741 +#ifndef __ASSEMBLY__
1742 +extern void _start (void);
1743 +#endif
1744 +
1745 +/* The function's entry point is stored in the first word of the
1746 +   function descriptor (plabel) of _start().  */
1747 +#define ENTRY_POINT ({long int *tmp = (long int *)((long)_start & ~2); tmp[0];})
1748 +
1749 +/* We have to provide a special declaration.  */
1750 +#define ENTRY_POINT_DECL(class) class void _start (void);
1751 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fclrexcpt.c        2003-10-22 01:03:47.000000000 -0400
1752 +++ glibc-2.3.2/sysdeps/hppa/fpu/fclrexcpt.c    2003-10-22 01:07:38.000000000 -0400
1753 @@ -29,7 +29,7 @@
1754    __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
1755  
1756    /* Clear all the relevant bits. */
1757 -  sw[0] &= ~(excepts & FE_ALL_EXCEPT) << 27;
1758 +  sw[0] &= ~((excepts & FE_ALL_EXCEPT) << 27);
1759    __asm__ ("fldd 0(%0),%%fr0" : : "r" (sw));
1760  
1761    /* Success.  */
1762 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fegetenv.c 2003-10-22 01:03:47.000000000 -0400
1763 +++ glibc-2.3.2/sysdeps/hppa/fpu/fegetenv.c     2003-10-22 01:07:38.000000000 -0400
1764 @@ -23,11 +23,12 @@
1765  int
1766  fegetenv (fenv_t *envp)
1767  {
1768 -  __asm__ (
1769 -          "fstd %%fr0,0(%2)\n"
1770 -          "fstd,ma %%fr1,8(%2)\n"
1771 -          "fstd,ma %%fr2,8(%2)\n"
1772 -          "fstd %%fr3,0(%2)\n"
1773 -          : "=m" (*envp), "=r" (envp) : "1" (envp));
1774 +  fenv_t *temp = envp;
1775 +  __asm__ ( "fstd,ma %%fr0,8(%1)\n"
1776 +           "fstd,ma %%fr1,8(%1)\n"
1777 +           "fstd,ma %%fr2,8(%1)\n"
1778 +           "fstd %%fr3,0(%1)\n"
1779 +           : "=m" (*temp), "+r" (temp)
1780 +         );  
1781    return 0;
1782  }
1783 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/feholdexcpt.c      2003-10-22 01:03:47.000000000 -0400
1784 +++ glibc-2.3.2/sysdeps/hppa/fpu/feholdexcpt.c  2003-10-22 01:07:38.000000000 -0400
1785 @@ -25,36 +25,29 @@
1786  feholdexcept (fenv_t *envp)
1787  {
1788    fenv_t clear;
1789 +  fenv_t * _regs = envp;
1790  
1791    /* Store the environment.  */
1792 -  {
1793 -    fenv_t * _regs = envp;
1794 -    __asm__ (
1795 -            "fstd %%fr0,0(%2)\n"
1796 -            "fstd,ma %%fr1,8(%2)\n"
1797 -            "fstd,ma %%fr2,8(%2)\n"
1798 -            "fstd %%fr3,0(%2)\n"
1799 -            : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
1800 -    memcpy (&clear, envp, sizeof (clear));
1801 -  }
1802 +  __asm__ ( "fstd,ma %%fr0,8(%1)\n"
1803 +           "fstd,ma %%fr1,8(%1)\n"
1804 +           "fstd,ma %%fr2,8(%1)\n"
1805 +           "fstd %%fr3,0(%1)\n"
1806 +           : "=m" (*_regs), "+r" (_regs)
1807 +         );
1808 +  
1809 +  memcpy (&clear, envp, sizeof (clear));
1810  
1811 -  /* Now clear all exceptions.  */
1812 -  clear.__status_word &= ~(FE_ALL_EXCEPT << 27);
1813 +  /* Now clear all exceptions (Enable bits and flags)  */
1814 +  clear.__status_word &= ~((FE_ALL_EXCEPT << 27) | FE_ALL_EXCEPT);
1815    memset (clear.__exception, 0, sizeof (clear.__exception));
1816  
1817 -  /* And set all exceptions to non-stop.  */
1818 -  clear.__status_word &= ~FE_ALL_EXCEPT;
1819 -
1820    /* Load the new environment. */
1821 -  {
1822 -    fenv_t * _regs = &clear + 1;
1823 -    __asm__ (
1824 -            "fldd,mb -8(%2),%%fr3\n"
1825 -            "fldd,mb -8(%2),%%fr2\n"
1826 -            "fldd,mb -8(%2),%%fr1\n"
1827 -            "fldd -8(%2),%%fr0\n"
1828 -            : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
1829 -  }
1830 +  __asm__ ( "fldd,ma -8(%1),%%fr3\n"
1831 +           "fldd,ma -8(%1),%%fr2\n"
1832 +           "fldd,ma -8(%1),%%fr1\n"
1833 +           "fldd 0(%1),%%fr0\n"
1834 +           : "=m" (*_regs), "+r" (_regs)
1835 +         );
1836  
1837    return 0;
1838  }
1839 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fesetenv.c 2003-10-22 01:03:47.000000000 -0400
1840 +++ glibc-2.3.2/sysdeps/hppa/fpu/fesetenv.c     2003-10-22 01:07:38.000000000 -0400
1841 @@ -26,24 +26,23 @@
1842  fesetenv (const fenv_t *envp)
1843  {
1844    fenv_t temp;
1845 +  fenv_t * _regs = &temp;
1846  
1847    /* Install the environment specified by ENVP.  But there are a few
1848       values which we do not want to come from the saved environment.
1849       Therefore, we get the current environment and replace the values
1850       we want to use from the environment specified by the parameter.  */
1851 -  {
1852 -    fenv_t * _regs = &temp;
1853 -    __asm__ (
1854 -            "fstd %%fr0,0(%2)\n"
1855 -            "fstd,ma %%fr1,8(%2)\n"
1856 -            "fstd,ma %%fr2,8(%2)\n"
1857 -            "fstd %%fr3,0(%2)\n"
1858 -            : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
1859 -  }
1860  
1861 -  temp.__status_word &= ~(FE_ALL_EXCEPT
1862 -                         | (FE_ALL_EXCEPT << 27)
1863 -                         | FE_DOWNWARD);
1864 +  __asm__ ( "fstd,ma %%fr0,8(%1)\n"
1865 +           "fstd,ma %%fr1,8(%1)\n"
1866 +           "fstd,ma %%fr2,8(%1)\n"
1867 +           "fstd %%fr3,0(%1)\n"
1868 +           : "=m" (*_regs), "+r" (_regs) 
1869 +         );
1870 +
1871 +  temp.__status_word &= ~((FE_ALL_EXCEPT << 27)
1872 +                         | FE_DOWNWARD 
1873 +                         | FE_ALL_EXCEPT);  
1874    if (envp == FE_DFL_ENV)
1875      ;
1876    else if (envp == FE_NOMASK_ENV)
1877 @@ -55,16 +54,13 @@
1878                               | (FE_ALL_EXCEPT << 27)));
1879  
1880    /* Load the new environment. */
1881 -  {
1882 -    fenv_t * _regs = &temp + 1;
1883 -    __asm__ (
1884 -            "fldd,mb -8(%2),%%fr3\n"
1885 -            "fldd,mb -8(%2),%%fr2\n"
1886 -            "fldd,mb -8(%2),%%fr1\n"
1887 -            "fldd -8(%2),%%fr0\n"
1888 -            : "=m" (*_regs), "=r" (_regs) : "1" (_regs));
1889 -  }
1890 -
1891 +  __asm__ ( "fldd,ma -8(%1),%%fr3\n"
1892 +           "fldd,ma -8(%1),%%fr2\n"
1893 +           "fldd,ma -8(%1),%%fr1\n"
1894 +           "fldd 0(%2),%%fr0\n"
1895 +           : "=m" (*_regs), "+r" (_regs)
1896 +         );
1897 +  
1898    /* Success.  */
1899    return 0;
1900  }
1901 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/feupdateenv.c      2003-10-22 01:03:47.000000000 -0400
1902 +++ glibc-2.3.2/sysdeps/hppa/fpu/feupdateenv.c  2003-10-22 01:07:38.000000000 -0400
1903 @@ -27,14 +27,12 @@
1904  
1905    /* Get the current exception status. */
1906    __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
1907 -  sw[0] &= (FE_ALL_EXCEPT << 27);
1908 -
1909 +  sw[0] &= FE_ALL_EXCEPT;
1910 +  envp->__status_word = envp->__status_word | sw[0];
1911 +  
1912    /* Install new environment.  */
1913    fesetenv (envp);
1914  
1915 -  /* Raise the saved exception. */
1916 -  feraiseexcept (sw[0] >> 27);
1917 -
1918    /* Success.  */
1919    return 0;
1920  }
1921 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fraiseexcpt.c      2003-10-22 01:03:47.000000000 -0400
1922 +++ glibc-2.3.2/sysdeps/hppa/fpu/fraiseexcpt.c  2003-10-22 01:07:38.000000000 -0400
1923 @@ -22,6 +22,9 @@
1924  #include <float.h>
1925  #include <math.h>
1926  
1927 +/* Please see section 10, 
1928 +   page 10-5 "Delayed Trapping" in the PA-RISC 2.0 Architecture manual */
1929 +
1930  int
1931  feraiseexcept (int excepts)
1932  {
1933 @@ -33,56 +36,64 @@
1934  
1935    /* We do these bits in assembly to be certain GCC doesn't optimize
1936       away something important, and so we can force delayed traps to
1937 -     occur.  */
1938 -
1939 -  /* FIXME: These all need verification! */
1940 +     occur. */
1941  
1942 -  /* First: invalid exception.  */
1943 +  /* We use "fldd 0(%%sr0,%%sp),%0" to flush the delayed exception */
1944 +       
1945 +  /* First: Invalid exception.  */
1946    if (excepts & FE_INVALID)
1947      {
1948        /* One example of a invalid operation is 0 * Infinity.  */
1949        double d = HUGE_VAL;
1950 -      __asm__ __volatile__ ("fmpy,dbl %1,%%fr0,%0\n\t"
1951 -                           /* FIXME: is this a proper trap barrier? */
1952 -                           "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d));
1953 +      __asm__ __volatile__ (
1954 +               "       fcpy,dbl %%fr0,%%fr22\n"
1955 +               "       fmpy,dbl %0,%%fr22,%0\n"
1956 +               "       fldd 0(%%sr0,%%sp),%0"
1957 +               : "+f" (d) : : "%fr22" );
1958      }
1959  
1960 -  /* Next: division by zero.  */
1961 +  /* Second: Division by zero.  */
1962    if (excepts & FE_DIVBYZERO)
1963      {
1964        double d = 1.0;
1965 -      __asm__ __volatile__ ("fdiv,dbl %1,%%fr0,%0\n\t"
1966 -                           "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d));
1967 +      __asm__ __volatile__ (
1968 +               "       fcpy,dbl %%fr0,%%fr22\n"
1969 +               "       fdiv,dbl %0,%%fr22,%0\n"
1970 +               "       fldd 0(%%sr0,%%sp),%0"
1971 +               : "+f" (d) : : "%fr22" );
1972      }
1973  
1974 -  /* Next: overflow.  */
1975 -  /* FIXME: Compare with IA-64 - do we have the same problem? */
1976 +  /* Third: Overflow.  */
1977    if (excepts & FE_OVERFLOW)
1978      {
1979        double d = DBL_MAX;
1980 -
1981 -      __asm__ __volatile__ ("fmpy,dbl %1,%1,%0\n\t"
1982 -                           "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d));
1983 +      __asm__ __volatile__ (
1984 +               "       fadd,dbl %0,%0,%0\n"
1985 +               "       fldd 0(%%sr0,%%sp),%0"
1986 +               : "+f" (d) );
1987      }
1988  
1989 -  /* Next: underflow.  */
1990 +  /* Fourth: Underflow.  */
1991    if (excepts & FE_UNDERFLOW)
1992      {
1993        double d = DBL_MIN;
1994 -      double e = 69.69;
1995 -
1996 -      __asm__ __volatile__ ("fdiv,dbl %1,%2,%0\n\t"
1997 -                           "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d), "f" (e));
1998 +      double e = 3.0;
1999 +      __asm__ __volatile__ (
2000 +               "       fdiv,dbl %0,%1,%0\n"
2001 +               "       fldd 0(%%sr0,%%sp),%0"
2002 +               : "+f" (d) : "f" (e) );
2003      }
2004  
2005 -  /* Last: inexact.  */
2006 +  /* Fifth: Inexact */
2007    if (excepts & FE_INEXACT)
2008      {
2009 -      double d = 1.0;
2010 -      double e = M_PI;
2011 -
2012 -      __asm__ __volatile__ ("fdiv,dbl %1,%2,%0\n\t"
2013 -                           "fcpy,dbl %%fr0,%%fr0" : "=f" (d) : "0" (d), "f" (e));
2014 +      double d = M_PI;
2015 +      double e = 69.69;
2016 +      __asm__ __volatile__ (
2017 +               "       fdiv,dbl %0,%1,%%fr22\n"
2018 +               "       fcnvfxt,dbl,sgl %%fr22,%%fr22L\n"
2019 +               "       fldd 0(%%sr0,%%sp),%%fr22"
2020 +               : : "f" (d), "f" (e) : "%fr22" );
2021      }
2022  
2023    /* Success.  */
2024 --- glibc-2.3.2-orig-debian/sysdeps/hppa/fpu/fsetexcptflg.c     2003-10-22 01:03:47.000000000 -0400
2025 +++ glibc-2.3.2/sysdeps/hppa/fpu/fsetexcptflg.c 2003-10-22 01:07:38.000000000 -0400
2026 @@ -29,8 +29,7 @@
2027    /* Get the current status word. */
2028    __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
2029  
2030 -  /* Install the new exception flags bits.  */
2031 -  sw[0] &= ~(excepts & (FE_ALL_EXCEPT >> 27));
2032 +  /* Clear exception flags, and install new neable trap bits  */
2033    sw[0] |= (*flagp & excepts & FE_ALL_EXCEPT) << 27;
2034  
2035    /* Store the new status word.  */
2036 --- glibc-2.3.2-orig-debian/sysdeps/hppa/libgcc-compat.c        1969-12-31 19:00:00.000000000 -0500
2037 +++ glibc-2.3.2/sysdeps/hppa/libgcc-compat.c    2003-10-22 01:07:38.000000000 -0400
2038 @@ -0,0 +1,43 @@
2039 +/* pre-.hidden libgcc compatibility
2040 +   Copyright (C) 2002 Free Software Foundation, Inc.
2041 +   This file is part of the GNU C Library.
2042 +   Contributed by Randolph Chung
2043 +
2044 +   The GNU C Library is free software; you can redistribute it and/or
2045 +   modify it under the terms of the GNU Lesser General Public
2046 +   License as published by the Free Software Foundation; either
2047 +   version 2.1 of the License, or (at your option) any later version.
2048 +
2049 +   The GNU C Library is distributed in the hope that it will be useful,
2050 +   but WITHOUT ANY WARRANTY; without even the implied warranty of
2051 +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
2052 +   Lesser General Public License for more details.
2053 +
2054 +   You should have received a copy of the GNU Lesser General Public
2055 +   License along with the GNU C Library; if not, write to the Free
2056 +   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
2057 +   02111-1307 USA.  */
2058 +
2059 +
2060 +#include <stdint.h>
2061 +#include <shlib-compat.h>
2062 +
2063 +#if SHLIB_COMPAT(libc, GLIBC_2_0, GLIBC_2_2_6)
2064 +
2065 +symbol_version (__clz_tab_internal, __clz_tab, GLIBC_2.2);
2066 +
2067 +typedef unsigned int UQItype  __attribute__ ((mode (QI)));
2068 +
2069 +const UQItype __clz_tab_internal[] =
2070 +{
2071 +  0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
2072 +  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
2073 +  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
2074 +  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
2075 +  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
2076 +  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
2077 +  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
2078 +  8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
2079 +};
2080 +
2081 +#endif
2082 --- glibc-2.3.2-orig-debian/sysdeps/hppa/sysdep.h       2003-10-22 01:03:47.000000000 -0400
2083 +++ glibc-2.3.2/sysdeps/hppa/sysdep.h   2003-10-22 01:07:38.000000000 -0400
2084 @@ -70,6 +70,7 @@
2085  #define        PSEUDO_END(name)                                                      \
2086    END (name)
2087  
2088 +#undef JUMPTARGET
2089  #define JUMPTARGET(name)       name
2090  #define SYSCALL_PIC_SETUP      /* Nothing.  */
2091  
2092 --- glibc-2.3.2-orig-debian/sysdeps/posix/getaddrinfo.c 2003-10-22 01:06:12.000000000 -0400
2093 +++ glibc-2.3.2/sysdeps/posix/getaddrinfo.c     2003-10-22 01:07:38.000000000 -0400
2094 @@ -53,6 +53,7 @@
2095  #include <sys/utsname.h>
2096  #include <net/if.h>
2097  #include <nsswitch.h>
2098 +#include <stdbool.h>
2099  
2100  #define GAIH_OKIFUNSPEC 0x0100
2101  #define GAIH_EAI        ~(GAIH_OKIFUNSPEC)
2102 --- glibc-2.3.2-orig-debian/sysdeps/unix/Makefile       2003-10-22 01:06:12.000000000 -0400
2103 +++ glibc-2.3.2/sysdeps/unix/Makefile   2003-10-22 01:07:38.000000000 -0400
2104 @@ -295,6 +295,7 @@
2105                           $(..)sysdeps/unix/Makefile
2106         $(make-target-directory)
2107         (echo '#include <errno.h>'; \
2108 +        echo 'extern long int _no_syscall(void);'; \
2109          echo 'long int _no_syscall (void)'; \
2110          echo '{ __set_errno (ENOSYS); return -1L; }'; \
2111          for call in $(unix-stub-syscalls); do \
2112 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/clone.S        2003-10-22 01:03:48.000000000 -0400
2113 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/clone.S    2003-10-22 01:07:38.000000000 -0400
2114 @@ -28,6 +28,8 @@
2115  
2116  /* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg) */
2117  
2118 +#warning CAO: __clone needs verification
2119 +
2120          .text
2121  ENTRY(__clone)
2122         /* FIXME: I have no idea how profiling works on hppa. */
2123 @@ -42,6 +44,11 @@
2124         stwm    %arg0,64(%arg1)
2125         stw     %arg3,-60(%arg1)
2126  
2127 +       /* Save the PIC register. */
2128 +#ifdef PIC
2129 +       stw     %r19,-32(%sr0, %sp)     /* parent */
2130 +#endif
2131 +
2132         /* Do the system call */
2133         copy    %arg2,%arg0
2134         ble     0x100(%sr2,%r0)
2135 @@ -53,19 +60,31 @@
2136  
2137         comib,=,n 0,%ret0,thread_start
2138  
2139 -       /* Successful return from the parent */
2140 +       /* Successful return from the parent
2141 +          No need to restore the PIC register, 
2142 +          since we return immediately. */
2143 +
2144         bv      %r0(%rp)
2145         nop
2146  
2147         /* Something bad happened -- no child created */
2148  .Lerror:
2149 +
2150 +       /* Restore the PIC register on error */
2151 +#ifdef PIC
2152 +       ldw     -32(%sr0, %sp), %r19    /* parent */
2153 +#endif
2154 +
2155         b       __syscall_error
2156         sub     %r0,%ret0,%arg0
2157  
2158  thread_start:
2159 +
2160         /* Load up the arguments.  */
2161 -       ldw     -60(%sp),%arg0
2162 -       ldw     -64(%sp),%r22
2163 +       ldw     -60(%sr0, %sp),%arg0
2164 +       ldw     -64(%sr0, %sp),%r22
2165 +
2166 +       /* FIXME: Don't touch the childs PIC register? */
2167  
2168         /* Call the user's function */
2169         bl      $$dyncall,%r31
2170 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/kernel_stat.h  2003-10-22 01:03:48.000000000 -0400
2171 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/kernel_stat.h      2003-10-22 01:07:38.000000000 -0400
2172 @@ -1,30 +1,30 @@
2173 -/* definition of "struct stat" from the kernel */
2174 +/* Definition of 'struct stat' taken from kernel, please keep up to date */
2175  struct kernel_stat {
2176 -       unsigned long   st_dev;         /* dev_t is 32 bits on parisc */
2177 -       unsigned long   st_ino;         /* 32 bits */
2178 +       unsigned int    st_dev;         /* dev_t is 32 bits on parisc */
2179 +       unsigned int    st_ino;         /* 32 bits */
2180         unsigned short  st_mode;        /* 16 bits */
2181         unsigned short  st_nlink;       /* 16 bits */
2182         unsigned short  st_reserved1;   /* old st_uid */
2183         unsigned short  st_reserved2;   /* old st_gid */
2184 -       unsigned long   st_rdev;
2185 -       unsigned long   st_size;
2186 +       unsigned int    st_rdev;
2187 +       unsigned int    st_size;
2188         struct timespec st_atim;
2189 -       struct timespec st_mtim;
2190 -       struct timespec st_ctim;
2191 -       long            st_blksize;
2192 -       long            st_blocks;
2193 -       unsigned long   __unused1;      /* ACL stuff */
2194 -       unsigned long   __unused2;      /* network */
2195 -       unsigned long   __unused3;      /* network */
2196 -       unsigned long   __unused4;      /* cnodes */
2197 +       struct timespec st_mtim;
2198 +       struct timespec st_ctim;
2199 +       int             st_blksize;
2200 +       int             st_blocks;
2201 +       unsigned int    __unused1;      /* ACL stuff */
2202 +       unsigned int    __unused2;      /* network */
2203 +       unsigned int    __unused3;      /* network */
2204 +       unsigned int    __unused4;      /* cnodes */
2205         unsigned short  __unused5;      /* netsite */
2206         short           st_fstype;
2207 -       unsigned long   st_realdev;
2208 +       unsigned int    st_realdev;
2209         unsigned short  st_basemode;
2210         unsigned short  st_spareshort;
2211 -       unsigned long   st_uid;
2212 -       unsigned long   st_gid;
2213 -       unsigned long   st_spare4[3];
2214 +       unsigned int    st_uid;
2215 +       unsigned int    st_gid;
2216 +       unsigned int    st_spare4[3];
2217  };
2218  
2219  #define _HAVE_STAT_NSEC
2220 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/mmap.c 2003-10-22 01:03:48.000000000 -0400
2221 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/mmap.c     2003-10-22 01:07:38.000000000 -0400
2222 @@ -37,7 +37,7 @@
2223         
2224         __ptr_t ret;
2225  
2226 -       ret = INLINE_SYSCALL(mmap, 6, addr, len, prot, flags, fd, offset);
2227 +       ret = (__ptr_t)INLINE_SYSCALL(mmap, 6, addr, len, prot, flags, fd, offset);
2228  
2229         /* check if it's really a negative number */
2230         if(((unsigned long)ret & 0xfffff000) == 0xfffff000)
2231 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/sysdep.c       2003-10-22 01:03:48.000000000 -0400
2232 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/sysdep.c   2003-10-22 01:07:38.000000000 -0400
2233 @@ -19,6 +19,10 @@
2234  #include <sysdep.h>
2235  #include <errno.h>
2236  
2237 +extern int __syscall_error(int err_no);
2238 +extern int syscall (int sysnum, int arg0, int arg1, int arg2, 
2239 +                       int arg3, int arg4, int arg5);
2240 +
2241  /* This routine is jumped to by all the syscall handlers, to stash
2242     an error number into errno.  */
2243  int
2244 @@ -30,25 +34,31 @@
2245  
2246  
2247  /* HPPA implements syscall() in 'C'; the assembler version would
2248 -   typically be in syscall.S.  */
2249 -
2250 +   typically be in syscall.S. Also note that we have INLINE_SYSCALL,
2251 +   INTERNAL_SYSCALL, and all the generated pure assembly syscall wrappers.
2252 +   How often the function is used is unknown. */
2253  int
2254  syscall (int sysnum, int arg0, int arg1, int arg2, int arg3, int arg4, int arg5)
2255  {
2256 -  long __sys_res;
2257 -  {
2258 -    register unsigned long __res asm("r28");
2259 -    LOAD_ARGS_6(arg0, arg1, arg2, arg3, arg4, arg5)
2260 -      asm volatile ("ble  0x100(%%sr2, %%r0)\n\t"
2261 -                   "copy %1, %%r20"
2262 -                   : "=r" (__res)
2263 -                   : "r" (sysnum) ASM_ARGS_6);
2264 -    __sys_res = __res;
2265 -  }
2266 -  if ((unsigned long) __sys_res >= (unsigned long)-4095)
2267 -    {
2268 -    __set_errno(-__sys_res);
2269 -    __sys_res = -1;
2270 -  }
2271 -  return __sys_res;
2272 +       /* FIXME: Keep this matching INLINE_SYSCALL for hppa */
2273 +       long __sys_res;
2274 +       {
2275 +               register unsigned long __res asm("r28");
2276 +               LOAD_ARGS_6(arg0, arg1, arg2, arg3, arg4, arg5)
2277 +               asm volatile (
2278 +                       STW_ASM_PIC
2279 +                       "       ble  0x100(%%sr2, %%r0) \n"
2280 +                       "       copy %1, %%r20          \n"
2281 +                       LDW_ASM_PIC
2282 +                       : "=r" (__res)
2283 +                       : "r" (sysnum) ASM_ARGS_6
2284 +                       : CALL_CLOB_REGS CLOB_ARGS_6
2285 +               );
2286 +               __sys_res = __res;
2287 +       }
2288 +       if ((unsigned long) __sys_res >= (unsigned long)-4095){
2289 +               __set_errno(-__sys_res);
2290 +               __sys_res = -1;
2291 +       }
2292 +       return __sys_res;
2293  }
2294 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/sysdep.h       2003-10-22 01:06:12.000000000 -0400
2295 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/sysdep.h   2003-10-22 01:07:38.000000000 -0400
2296 @@ -31,6 +31,26 @@
2297  #undef SYS_ify
2298  #define SYS_ify(syscall_name)  (__NR_##syscall_name)
2299  
2300 +/* Included for older kernels whose headers 
2301 +   are missing the define  */ 
2302 +#ifndef __NR_semtimedop
2303 +# define __NR_semtimedop 228
2304 +#endif
2305 +
2306 +# ifdef PIC
2307 +/* WARNING: CANNOT BE USED IN A NOP! */
2308 +#  define STW_PIC stw %r19, -32(%sr0, %sp) ASM_LINE_SEP
2309 +#  define LDW_PIC ldw -32(%sr0, %sp), %r19 ASM_LINE_SEP
2310 +#  define STW_ASM_PIC  "       copy %%r19, %%r4\n"
2311 +#  define LDW_ASM_PIC  "       copy %%r4, %%r19\n"
2312 +#  define USING_GR4    "%r4",
2313 +# else
2314 +#  define STW_PIC ASM_LINE_SEP
2315 +#  define LDW_PIC ASM_LINE_SEP
2316 +#  define STW_ASM_PIC  " \n"
2317 +#  define LDW_ASM_PIC  " \n"
2318 +#  define USING_GR4
2319 +# endif
2320  
2321  #ifdef __ASSEMBLER__
2322  
2323 @@ -77,20 +97,13 @@
2324         .text                                   ASM_LINE_SEP    \
2325         .export C_SYMBOL_NAME(name)             ASM_LINE_SEP    \
2326         .type   C_SYMBOL_NAME(name),@function   ASM_LINE_SEP    \
2327 -       C_LABEL(name)                                           \
2328 -       CALL_MCOUNT
2329 -
2330 -#define ret \
2331 -       bv 0(2)                                 ASM_LINE_SEP    \
2332 -       nop
2333 -
2334 -#define ret_NOERRNO \
2335 -       bv 0(2)                                 ASM_LINE_SEP    \
2336 -       nop
2337 +       C_LABEL(name)                           ASM_LINE_SEP    \
2338 +       CALL_MCOUNT                             ASM_LINE_SEP
2339  
2340  #undef END
2341 -#define END(name)                                              \
2342 -1:     .size   C_SYMBOL_NAME(name),1b-C_SYMBOL_NAME(name)
2343 +#define END(name)                                                      \
2344 +1:                                                     ASM_LINE_SEP    \
2345 +.size  C_SYMBOL_NAME(name),1b-C_SYMBOL_NAME(name)      ASM_LINE_SEP    \
2346  
2347  /* If compiled for profiling, call `mcount' at the start of each function.  */
2348  /* No, don't bother.  gcc will put the call in for us.  */
2349 @@ -110,27 +123,83 @@
2350         nop
2351  */
2352  
2353 -#define        PSEUDO(name, syscall_name, args)                                      \
2354 -  ENTRY (name)                                                               \
2355 -  DO_CALL(syscall_name, args)                                  ASM_LINE_SEP  \
2356 -  nop
2357 +#define        PSEUDO(name, syscall_name, args)                        \
2358 +  ENTRY (name)                                                 \
2359 +  DO_CALL(syscall_name, args)                  ASM_LINE_SEP    \
2360 +  nop                                          ASM_LINE_SEP
2361 +
2362 +#define ret \
2363 +       /* Return value set by ERRNO code */    ASM_LINE_SEP    \
2364 +       bv 0(2)                                 ASM_LINE_SEP    \
2365 +       nop                                     ASM_LINE_SEP
2366  
2367  #undef PSEUDO_END
2368 -#define        PSEUDO_END(name)                                                      \
2369 +#define        PSEUDO_END(name)                                        \
2370    END (name)
2371  
2372 -#define        PSEUDO_NOERRNO(name, syscall_name, args)                              \
2373 -  ENTRY (name)                                                               \
2374 -  DO_CALL(syscall_name, args)                                  ASM_LINE_SEP  \
2375 -  nop
2376 +/* We don't set the errno on the return from the syscall */
2377 +#define        PSEUDO_NOERRNO(name, syscall_name, args)                \
2378 +  ENTRY (name)                                                 \
2379 +  DO_CALL_NOERRNO(syscall_name, args)          ASM_LINE_SEP    \
2380 +  nop                                          ASM_LINE_SEP
2381  
2382 +#define ret_NOERRNO ret 
2383 +  
2384  #undef PSEUDO_END_NOERRNO
2385 -#define        PSEUDO_END_NOERRNO(name)                                              \
2386 +#define        PSEUDO_END_NOERRNO(name)                                \
2387    END (name)
2388  
2389 +/* This has to return the error value */
2390 +#undef  PSEUDO_ERRVAL
2391 +#define PSEUDO_ERRVAL(name, syscall_name, args)                        \
2392 +       ENTRY(name)                                             \
2393 +       DO_CALL_ERRVAL(syscall_name, args)      ASM_LINE_SEP    \
2394 +       nop                                     ASM_LINE_SEP
2395 +
2396 +#define ret_ERRVAL ret
2397 +       
2398 +#undef PSEUDO_END_ERRVAL
2399 +#define PSEUDO_END_ERRVAL(name)                                        \
2400 +       END(name)
2401 +
2402 +#undef JUMPTARGET
2403  #define JUMPTARGET(name)       name
2404  #define SYSCALL_PIC_SETUP      /* Nothing.  */
2405  
2406 +       
2407 +/* All the syscall assembly macros rely on finding the approriate 
2408 +   SYSCALL_ERROR_LABEL or rather HANDLER. */
2409 +
2410 +/* int * __errno_location(void) so you have to store your value
2411 +   into the return address! */
2412 +#define DEFAULT_SYSCALL_ERROR_HANDLER                  \
2413 +       .import __errno_location,code   ASM_LINE_SEP    \
2414 +       /* branch to errno handler */   ASM_LINE_SEP    \
2415 +       bl __errno_location,%rp         ASM_LINE_SEP
2416 +       
2417 +/* Here are the myriad of configuration options that the above can
2418 +   work for... what we've done is provide the framework for future
2419 +   changes if required to each section */
2420 +       
2421 +#ifdef PIC
2422 +# if RTLD_PRIVATE_ERRNO
2423 +#  define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER                      
2424 +# else /* !RTLD_PRIVATE_ERRNO */
2425 +#  if defined _LIBC_REENTRANT
2426 +#   define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
2427 +#  else /* !_LIBC_REENTRANT */
2428 +#   define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
2429 +#  endif /* _LIBC_REENTRANT */
2430 +# endif /* RTLD_PRIVATE_ERRNO */
2431 +#else
2432 +# ifndef _LIBC_REENTRANT
2433 +#  define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
2434 +# else
2435 +#  define SYSCALL_ERROR_HANDLER DEFAULT_SYSCALL_ERROR_HANDLER
2436 +# endif
2437 +#endif                                 
2438 +       
2439 +       
2440  /* Linux takes system call arguments in registers:
2441         syscall number  gr20
2442         arg 1           gr26
2443 @@ -159,25 +228,61 @@
2444  
2445  #undef DO_CALL
2446  #define DO_CALL(syscall_name, args)                            \
2447 -       DOARGS_##args                                           \
2448 +       DOARGS_##args                           ASM_LINE_SEP    \
2449 +       STW_PIC                                 ASM_LINE_SEP    \
2450 +       /* Do syscall, delay loads # */         ASM_LINE_SEP    \
2451         ble  0x100(%sr2,%r0)                    ASM_LINE_SEP    \
2452         ldi SYS_ify (syscall_name), %r20        ASM_LINE_SEP    \
2453         ldi -0x1000,%r1                         ASM_LINE_SEP    \
2454         cmpb,>>=,n %r1,%ret0,0f                 ASM_LINE_SEP    \
2455 -       stw %rp, -20(%sr0,%r30)                 ASM_LINE_SEP    \
2456 -       stw %ret0, -24(%sr0,%r30)               ASM_LINE_SEP    \
2457 -       .import __errno_location,code           ASM_LINE_SEP    \
2458 -       bl __errno_location,%rp                 ASM_LINE_SEP    \
2459 -       ldo 64(%r30), %r30                      ASM_LINE_SEP    \
2460 -       ldo -64(%r30), %r30                     ASM_LINE_SEP    \
2461 -       ldw -24(%r30), %r26                     ASM_LINE_SEP    \
2462 +       /* save rp or we get lost */            ASM_LINE_SEP    \
2463 +       stw %rp, -20(%sr0,%sp)                  ASM_LINE_SEP    \
2464 +       /* Restore r19 from frame */            ASM_LINE_SEP    \
2465 +       LDW_PIC                                 ASM_LINE_SEP    \
2466 +       stw %ret0, -24(%sr0,%sp)                ASM_LINE_SEP    \
2467 +       SYSCALL_ERROR_HANDLER                   ASM_LINE_SEP    \
2468 +       /* create frame */                      ASM_LINE_SEP    \
2469 +       ldo 64(%sp), %sp                        ASM_LINE_SEP    \
2470 +       ldo -64(%sp), %sp                       ASM_LINE_SEP    \
2471 +       /* OPTIMIZE: Don't reload r19 */        ASM_LINE_SEP    \
2472 +       /* do a -1*syscall_ret0 */              ASM_LINE_SEP    \
2473 +       ldw -24(%sr0,%sp), %r26                 ASM_LINE_SEP    \
2474         sub %r0, %r26, %r26                     ASM_LINE_SEP    \
2475 +       /* Store into errno location */         ASM_LINE_SEP    \
2476         stw %r26, 0(%sr0,%ret0)                 ASM_LINE_SEP    \
2477 +       /* return -1 as error */                ASM_LINE_SEP    \
2478         ldo -1(%r0), %ret0                      ASM_LINE_SEP    \
2479 -       ldw -20(%r30), %rp                      ASM_LINE_SEP    \
2480 +       ldw -20(%sr0,%sp), %rp                  ASM_LINE_SEP    \
2481  0:                                             ASM_LINE_SEP    \
2482 +       UNDOARGS_##args                         ASM_LINE_SEP
2483 +
2484 +/* We do nothing with the return, except hand it back to someone else */
2485 +#undef  DO_CALL_NOERRNO
2486 +#define DO_CALL_NOERRNO(syscall_name, args)                    \
2487 +       DOARGS_##args                                           \
2488 +       /* No need to store r19 */              ASM_LINE_SEP    \
2489 +       ble  0x100(%sr2,%r0)                    ASM_LINE_SEP    \
2490 +       ldi SYS_ify (syscall_name), %r20        ASM_LINE_SEP    \
2491 +       /* Caller will restore r19 */           ASM_LINE_SEP    \
2492         UNDOARGS_##args
2493  
2494 +/* Here, we return the ERRVAL in assembly, note we don't call the
2495 +   error handler function, but we do 'negate' the return _IF_
2496 +   it's an error. Not sure if this is the right semantic. */
2497 +
2498 +#undef DO_CALL_ERRVAL
2499 +#define DO_CALL_ERRVAL(syscall_name, args)                     \
2500 +       DOARGS_##args                           ASM_LINE_SEP    \
2501 +       /* No need to store r19 */              ASM_LINE_SEP    \
2502 +       ble  0x100(%sr2,%r0)                    ASM_LINE_SEP    \
2503 +       ldi SYS_ify (syscall_name), %r20        ASM_LINE_SEP    \
2504 +       /* Caller will restore r19 */           ASM_LINE_SEP    \
2505 +       ldi -0x1000,%r1                         ASM_LINE_SEP    \
2506 +       cmpb,>>=,n %r1,%ret0,0f                 ASM_LINE_SEP    \
2507 +       sub %r0, %ret0, %ret0                   ASM_LINE_SEP    \
2508 +0:                                             ASM_LINE_SEP    \
2509 +       UNDOARGS_##args                         ASM_LINE_SEP
2510 +
2511  #define DOARGS_0 /* nothing */
2512  #define DOARGS_1 /* nothing */
2513  #define DOARGS_2 /* nothing */
2514 @@ -198,26 +303,87 @@
2515  
2516  #else
2517  
2518 +/* GCC has to be warned that a syscall may clobber all the ABI 
2519 +   registers listed as "caller-saves", see page 8, Table 2 
2520 +   in section 2.2.6 of the PA-RISC RUN-TIME architecture 
2521 +   document. However! r28 is the result and will conflict with
2522 +   the clobber list so it is left out. Also the input arguments
2523 +   registers r20 -> r26 will conflict with the list so they 
2524 +   are treated specially. Although r19 is clobbered by the syscall
2525 +   we cannot say this because it would violate ABI, thus we say
2526 +   r4 is clobbered and use that register to save/restore r19
2527 +   across the syscall. */
2528 +                
2529 +#define CALL_CLOB_REGS "%r1", "%r2", USING_GR4 \
2530 +                       "%r20", "%r29", "%r31"
2531 +                
2532  #undef INLINE_SYSCALL
2533 -#define INLINE_SYSCALL(name, nr, args...)      ({              \
2534 +#define INLINE_SYSCALL(name, nr, args...)      ({                      \
2535 +       long __sys_res;                                                 \
2536 +       {                                                               \
2537 +               register unsigned long __res asm("r28");                \
2538 +               LOAD_ARGS_##nr(args)                                    \
2539 +               /* FIXME: HACK stw/ldw r19 around syscall */            \
2540 +               asm volatile(                                           \
2541 +                       STW_ASM_PIC                                     \
2542 +                       "       ble  0x100(%%sr2, %%r0)\n"              \
2543 +                       "       ldi %1, %%r20\n"                        \
2544 +                       LDW_ASM_PIC                                     \
2545 +                       : "=r" (__res)                                  \
2546 +                       : "i" (SYS_ify(name)) ASM_ARGS_##nr             \
2547 +                       : CALL_CLOB_REGS CLOB_ARGS_##nr                 \
2548 +               );                                                      \
2549 +               __sys_res = (long)__res;                                \
2550 +       }                                                               \
2551 +       if ( (unsigned long)__sys_res >= (unsigned long)-4095 ){        \
2552 +               __set_errno(-__sys_res);                                \
2553 +               __sys_res = -1;                                         \
2554 +       }                                                               \
2555 +       __sys_res;                                                      \
2556 +})
2557 +
2558 +/* INTERNAL_SYSCALL_DECL - Allows us to setup some function static 
2559 +   value to use within the context of the syscall 
2560 +   INTERNAL_SYSCALL_ERROR_P - Returns 0 if it wasn't an error, 1 otherwise
2561 +   You are allowed to use the syscall result (val) and the DECL error variable
2562 +   to determine what went wrong.
2563 +   INTERLAL_SYSCALL_ERRNO - Munges the val/err pair into the error number.
2564 +   In our case we just flip the sign. */
2565 +
2566 +#undef INTERNAL_SYSCALL_DECL
2567 +#define INTERNAL_SYSCALL_DECL(err) do { } while (0)
2568 +
2569 +/* Equivalent to  (val < 0)&&(val > -4095) which is what we want */
2570 +#undef INTERNAL_SYSCALL_ERROR_P
2571 +#define INTERNAL_SYSCALL_ERROR_P(val, err) \
2572 +       ((unsigned long)val >= (unsigned long)-4095)
2573 +       
2574 +#undef INTERNAL_SYSCALL_ERRNO
2575 +#define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
2576 +
2577 +/* Similar to INLINE_SYSCALL but we don't set errno */
2578 +#undef INTERNAL_SYSCALL
2579 +#define INTERNAL_SYSCALL(name, err, nr, args...)               \
2580 +({                                                             \
2581         long __sys_res;                                         \
2582         {                                                       \
2583                 register unsigned long __res asm("r28");        \
2584                 LOAD_ARGS_##nr(args)                            \
2585 +               /* FIXME: HACK stw/ldw r19 around syscall */    \
2586                 asm volatile(                                   \
2587 -                       "ble  0x100(%%sr2, %%r0)\n\t"           \
2588 -                       " ldi %1, %%r20"                        \
2589 +                       STW_ASM_PIC                             \
2590 +                       "       ble  0x100(%%sr2, %%r0)\n"      \
2591 +                       "       ldi %1, %%r20\n"                \
2592 +                       LDW_ASM_PIC                             \
2593                         : "=r" (__res)                          \
2594                         : "i" (SYS_ify(name)) ASM_ARGS_##nr     \
2595 -                        );                                     \
2596 -               __sys_res = __res;                              \
2597 -       }                                                       \
2598 -       if ((unsigned long)__sys_res >= (unsigned long)-4095) { \
2599 -               __set_errno(-__sys_res);                        \
2600 -               __sys_res = -1;                                 \
2601 +                       : CALL_CLOB_REGS CLOB_ARGS_##nr         \
2602 +               );                                              \
2603 +               __sys_res = (long)__res;                        \
2604         }                                                       \
2605         __sys_res;                                              \
2606 -})
2607 + })
2608 +
2609  
2610  #define LOAD_ARGS_0()
2611  #define LOAD_ARGS_1(r26)                                       \
2612 @@ -239,12 +405,22 @@
2613         register unsigned long __r21 __asm__("r21") = (unsigned long)r21;       \
2614         LOAD_ARGS_5(r26,r25,r24,r23,r22)
2615  
2616 -#define ASM_ARGS_0
2617 -#define ASM_ARGS_1 , "r" (__r26)
2618 -#define ASM_ARGS_2 , "r" (__r26), "r" (__r25)
2619 -#define ASM_ARGS_3 , "r" (__r26), "r" (__r25), "r" (__r24)
2620 -#define ASM_ARGS_4 , "r" (__r26), "r" (__r25), "r" (__r24), "r" (__r23)
2621 -#define ASM_ARGS_5 , "r" (__r26), "r" (__r25), "r" (__r24), "r" (__r23), "r" (__r22)
2622 -#define ASM_ARGS_6 , "r" (__r26), "r" (__r25), "r" (__r24), "r" (__r23), "r" (__r22), "r" (__r21)
2623 -
2624 +/* Even with zero args we use r20 for the syscall number */
2625 +#define ASM_ARGS_0 
2626 +#define ASM_ARGS_1 ASM_ARGS_0, "r" (__r26)
2627 +#define ASM_ARGS_2 ASM_ARGS_1, "r" (__r25)
2628 +#define ASM_ARGS_3 ASM_ARGS_2, "r" (__r24)
2629 +#define ASM_ARGS_4 ASM_ARGS_3, "r" (__r23)
2630 +#define ASM_ARGS_5 ASM_ARGS_4, "r" (__r22)
2631 +#define ASM_ARGS_6 ASM_ARGS_5, "r" (__r21)
2632 +
2633 +/* The registers not listed as inputs but clobbered */
2634 +#define CLOB_ARGS_6
2635 +#define CLOB_ARGS_5 CLOB_ARGS_6, "%r21"
2636 +#define CLOB_ARGS_4 CLOB_ARGS_5, "%r22"
2637 +#define CLOB_ARGS_3 CLOB_ARGS_4, "%r23"
2638 +#define CLOB_ARGS_2 CLOB_ARGS_3, "%r24"
2639 +#define CLOB_ARGS_1 CLOB_ARGS_2, "%r25"
2640 +#define CLOB_ARGS_0 CLOB_ARGS_1, "%r26"
2641 +       
2642  #endif /* __ASSEMBLER__ */
2643 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/hppa/umount.c       2003-10-22 01:03:48.000000000 -0400
2644 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/hppa/umount.c   2003-10-22 01:07:38.000000000 -0400
2645 @@ -21,6 +21,7 @@
2646     does down here.  */
2647  
2648  extern long int __umount2 (const char *name, int flags);
2649 +extern long int __umount (const char * name);
2650  
2651  long int
2652  __umount (const char *name)
2653 --- glibc-2.3.2-orig-debian/sysdeps/unix/sysv/linux/sys/sysctl.h        2003-10-22 01:06:13.000000000 -0400
2654 +++ glibc-2.3.2/sysdeps/unix/sysv/linux/sys/sysctl.h    2003-10-22 01:07:38.000000000 -0400
2655 @@ -24,7 +24,9 @@
2656  #include <stddef.h>
2657  /* Prevent more kernel headers than necessary to be included.  */
2658  #define _LINUX_KERNEL_H        1
2659 -#define _LINUX_TYPES_H 1
2660 +#ifndef _LINUX_TYPES_H
2661 +# define _LINUX_TYPES_H 1
2662 +#endif
2663  #define _LINUX_LIST_H  1
2664  /* We do need this one for the declarations in <linux/sysctl.h>,
2665     since we've elided the inclusion of <linux/kernel.h> that gets them.  */