1 /* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
2 * atomic.S: These things are too big to do inline.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
7 #include <linux/config.h>
10 /* On SMP we need to use memory barriers to ensure
11 * correct memory operation ordering, nop these out
15 #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad
16 #define ATOMIC_POST_BARRIER membar #StoreLoad | #StoreStore
18 #define ATOMIC_PRE_BARRIER nop
19 #define ATOMIC_POST_BARRIER nop
24 /* Two versions of the atomic routines, one that
25 * does not return a value and does not perform
26 * memory barriers, and a second which returns
27 * a value and does the barriers.
30 .type atomic_add,#function
31 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
40 .size atomic_add, .-atomic_add
43 .type atomic_sub,#function
44 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
53 .size atomic_sub, .-atomic_sub
56 .type atomic_add_ret,#function
57 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
68 .size atomic_add_ret, .-atomic_add_ret
71 .type atomic_sub_ret,#function
72 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
83 .size atomic_sub_ret, .-atomic_sub_ret
86 .type atomic64_add,#function
87 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
96 .size atomic64_add, .-atomic64_add
99 .type atomic64_sub,#function
100 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
109 .size atomic64_sub, .-atomic64_sub
111 .globl atomic64_add_ret
112 .type atomic64_add_ret,#function
113 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
124 .size atomic64_add_ret, .-atomic64_add_ret
126 .globl atomic64_sub_ret
127 .type atomic64_sub_ret,#function
128 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
139 .size atomic64_sub_ret, .-atomic64_sub_ret