2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/compiler.h>
13 #include <linux/irqflags.h>
14 #include <linux/types.h>
15 #include <asm/barrier.h>
17 #include <asm/byteorder.h> /* sigh ... */
18 #include <asm/cpu-features.h>
19 #include <asm/sgidefs.h>
22 #if _MIPS_SZLONG == 32
24 #define SZLONG_MASK 31UL
29 #elif _MIPS_SZLONG == 64
31 #define SZLONG_MASK 63UL
39 * clear_bit() doesn't provide any barrier for the compiler.
41 #define smp_mb__before_clear_bit() smp_llsc_mb()
42 #define smp_mb__after_clear_bit() smp_llsc_mb()
45 * set_bit - Atomically set a bit in memory
47 * @addr: the address to start counting from
49 * This function is atomic and may not be reordered. See __set_bit()
50 * if you do not require the atomic guarantees.
51 * Note that @nr may be almost arbitrarily large; this function is not
52 * restricted to acting on a single-word quantity.
54 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
56 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
57 unsigned short bit = nr & SZLONG_MASK;
60 if (cpu_has_llsc && R10000_LLSC_WAR) {
63 "1: " __LL "%0, %1 # set_bit \n"
68 : "=&r" (temp), "=m" (*m)
69 : "ir" (1UL << bit), "m" (*m));
70 #ifdef CONFIG_CPU_MIPSR2
71 } else if (__builtin_constant_p(bit)) {
73 "1: " __LL "%0, %1 # set_bit \n"
74 " " __INS "%0, %4, %2, 1 \n"
80 : "=&r" (temp), "=m" (*m)
81 : "ir" (bit), "m" (*m), "r" (~0));
82 #endif /* CONFIG_CPU_MIPSR2 */
83 } else if (cpu_has_llsc) {
86 "1: " __LL "%0, %1 # set_bit \n"
94 : "=&r" (temp), "=m" (*m)
95 : "ir" (1UL << bit), "m" (*m));
97 volatile unsigned long *a = addr;
101 a += nr >> SZLONG_LOG;
103 raw_local_irq_save(flags);
105 raw_local_irq_restore(flags);
110 * clear_bit - Clears a bit in memory
112 * @addr: Address to start counting from
114 * clear_bit() is atomic and may not be reordered. However, it does
115 * not contain a memory barrier, so if it is used for locking purposes,
116 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
117 * in order to ensure changes are visible on other processors.
119 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
122 unsigned short bit = nr & SZLONG_MASK;
125 if (cpu_has_llsc && R10000_LLSC_WAR) {
126 __asm__ __volatile__(
128 "1: " __LL "%0, %1 # clear_bit \n"
133 : "=&r" (temp), "=m" (*m)
134 : "ir" (~(1UL << bit)), "m" (*m));
135 #ifdef CONFIG_CPU_MIPSR2
136 } else if (__builtin_constant_p(bit)) {
137 __asm__ __volatile__(
138 "1: " __LL "%0, %1 # clear_bit \n"
139 " " __INS "%0, $0, %2, 1 \n"
145 : "=&r" (temp), "=m" (*m)
146 : "ir" (bit), "m" (*m));
147 #endif /* CONFIG_CPU_MIPSR2 */
148 } else if (cpu_has_llsc) {
149 __asm__ __volatile__(
151 "1: " __LL "%0, %1 # clear_bit \n"
159 : "=&r" (temp), "=m" (*m)
160 : "ir" (~(1UL << bit)), "m" (*m));
162 volatile unsigned long *a = addr;
166 a += nr >> SZLONG_LOG;
168 raw_local_irq_save(flags);
170 raw_local_irq_restore(flags);
175 * clear_bit_unlock - Clears a bit in memory
177 * @addr: Address to start counting from
179 * clear_bit() is atomic and implies release semantics before the memory
180 * operation. It can be used for an unlock.
182 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
184 smp_mb__before_clear_bit();
189 * change_bit - Toggle a bit in memory
191 * @addr: Address to start counting from
193 * change_bit() is atomic and may not be reordered.
194 * Note that @nr may be almost arbitrarily large; this function is not
195 * restricted to acting on a single-word quantity.
197 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
199 unsigned short bit = nr & SZLONG_MASK;
201 if (cpu_has_llsc && R10000_LLSC_WAR) {
202 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
205 __asm__ __volatile__(
207 "1: " __LL "%0, %1 # change_bit \n"
212 : "=&r" (temp), "=m" (*m)
213 : "ir" (1UL << bit), "m" (*m));
214 } else if (cpu_has_llsc) {
215 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
218 __asm__ __volatile__(
220 "1: " __LL "%0, %1 # change_bit \n"
228 : "=&r" (temp), "=m" (*m)
229 : "ir" (1UL << bit), "m" (*m));
231 volatile unsigned long *a = addr;
235 a += nr >> SZLONG_LOG;
237 raw_local_irq_save(flags);
239 raw_local_irq_restore(flags);
244 * test_and_set_bit - Set a bit and return its old value
246 * @addr: Address to count from
248 * This operation is atomic and cannot be reordered.
249 * It also implies a memory barrier.
251 static inline int test_and_set_bit(unsigned long nr,
252 volatile unsigned long *addr)
254 unsigned short bit = nr & SZLONG_MASK;
259 if (cpu_has_llsc && R10000_LLSC_WAR) {
260 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
263 __asm__ __volatile__(
265 "1: " __LL "%0, %1 # test_and_set_bit \n"
271 : "=&r" (temp), "=m" (*m), "=&r" (res)
272 : "r" (1UL << bit), "m" (*m)
274 } else if (cpu_has_llsc) {
275 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
278 __asm__ __volatile__(
282 "1: " __LL "%0, %1 # test_and_set_bit \n"
292 : "=&r" (temp), "=m" (*m), "=&r" (res)
293 : "r" (1UL << bit), "m" (*m)
296 volatile unsigned long *a = addr;
300 a += nr >> SZLONG_LOG;
302 raw_local_irq_save(flags);
305 raw_local_irq_restore(flags);
314 * test_and_set_bit_lock - Set a bit and return its old value
316 * @addr: Address to count from
318 * This operation is atomic and implies acquire ordering semantics
319 * after the memory operation.
321 static inline int test_and_set_bit_lock(unsigned long nr,
322 volatile unsigned long *addr)
324 unsigned short bit = nr & SZLONG_MASK;
327 if (cpu_has_llsc && R10000_LLSC_WAR) {
328 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
331 __asm__ __volatile__(
333 "1: " __LL "%0, %1 # test_and_set_bit \n"
339 : "=&r" (temp), "=m" (*m), "=&r" (res)
340 : "r" (1UL << bit), "m" (*m)
342 } else if (cpu_has_llsc) {
343 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
346 __asm__ __volatile__(
350 "1: " __LL "%0, %1 # test_and_set_bit \n"
360 : "=&r" (temp), "=m" (*m), "=&r" (res)
361 : "r" (1UL << bit), "m" (*m)
364 volatile unsigned long *a = addr;
368 a += nr >> SZLONG_LOG;
370 raw_local_irq_save(flags);
373 raw_local_irq_restore(flags);
381 * test_and_clear_bit - Clear a bit and return its old value
383 * @addr: Address to count from
385 * This operation is atomic and cannot be reordered.
386 * It also implies a memory barrier.
388 static inline int test_and_clear_bit(unsigned long nr,
389 volatile unsigned long *addr)
391 unsigned short bit = nr & SZLONG_MASK;
396 if (cpu_has_llsc && R10000_LLSC_WAR) {
397 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
400 __asm__ __volatile__(
402 "1: " __LL "%0, %1 # test_and_clear_bit \n"
409 : "=&r" (temp), "=m" (*m), "=&r" (res)
410 : "r" (1UL << bit), "m" (*m)
412 #ifdef CONFIG_CPU_MIPSR2
413 } else if (__builtin_constant_p(nr)) {
414 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
417 __asm__ __volatile__(
418 "1: " __LL "%0, %1 # test_and_clear_bit \n"
419 " " __EXT "%2, %0, %3, 1 \n"
420 " " __INS "%0, $0, %3, 1 \n"
426 : "=&r" (temp), "=m" (*m), "=&r" (res)
427 : "ri" (bit), "m" (*m)
430 } else if (cpu_has_llsc) {
431 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
434 __asm__ __volatile__(
438 "1: " __LL "%0, %1 # test_and_clear_bit \n"
449 : "=&r" (temp), "=m" (*m), "=&r" (res)
450 : "r" (1UL << bit), "m" (*m)
453 volatile unsigned long *a = addr;
457 a += nr >> SZLONG_LOG;
459 raw_local_irq_save(flags);
462 raw_local_irq_restore(flags);
471 * test_and_change_bit - Change a bit and return its old value
473 * @addr: Address to count from
475 * This operation is atomic and cannot be reordered.
476 * It also implies a memory barrier.
478 static inline int test_and_change_bit(unsigned long nr,
479 volatile unsigned long *addr)
481 unsigned short bit = nr & SZLONG_MASK;
486 if (cpu_has_llsc && R10000_LLSC_WAR) {
487 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
490 __asm__ __volatile__(
492 "1: " __LL "%0, %1 # test_and_change_bit \n"
498 : "=&r" (temp), "=m" (*m), "=&r" (res)
499 : "r" (1UL << bit), "m" (*m)
501 } else if (cpu_has_llsc) {
502 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
505 __asm__ __volatile__(
509 "1: " __LL "%0, %1 # test_and_change_bit \n"
511 " " __SC "\t%2, %1 \n"
519 : "=&r" (temp), "=m" (*m), "=&r" (res)
520 : "r" (1UL << bit), "m" (*m)
523 volatile unsigned long *a = addr;
527 a += nr >> SZLONG_LOG;
529 raw_local_irq_save(flags);
532 raw_local_irq_restore(flags);
540 #include <asm-generic/bitops/non-atomic.h>
543 * __clear_bit_unlock - Clears a bit in memory
545 * @addr: Address to start counting from
547 * __clear_bit() is non-atomic and implies release semantics before the memory
548 * operation. It can be used for an unlock if no other CPUs can concurrently
549 * modify other bits in the word.
551 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
554 __clear_bit(nr, addr);
558 * Return the bit position (0..63) of the most significant 1 bit in a word
559 * Returns -1 if no 1 bit exists
561 static inline int __ilog2(unsigned long x)
565 if (sizeof(x) == 4) {
577 BUG_ON(sizeof(x) != 8);
590 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
593 * __ffs - find first bit in word.
594 * @word: The word to search
596 * Returns 0..SZLONG-1
597 * Undefined if no bit exists, so code should check against 0 first.
599 static inline unsigned long __ffs(unsigned long word)
601 return __ilog2(word & -word);
605 * fls - find last bit set.
606 * @word: The word to search
608 * This is defined the same way as ffs.
609 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
611 static inline int fls(int word)
613 __asm__("clz %0, %1" : "=r" (word) : "r" (word));
618 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
619 static inline int fls64(__u64 word)
621 __asm__("dclz %0, %1" : "=r" (word) : "r" (word));
626 #include <asm-generic/bitops/fls64.h>
630 * ffs - find first bit set.
631 * @word: The word to search
633 * This is defined the same way as
634 * the libc and compiler builtin ffs routines, therefore
635 * differs in spirit from the above ffz (man ffs).
637 static inline int ffs(int word)
642 return fls(word & -word);
647 #include <asm-generic/bitops/__ffs.h>
648 #include <asm-generic/bitops/ffs.h>
649 #include <asm-generic/bitops/fls.h>
650 #include <asm-generic/bitops/fls64.h>
652 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
654 #include <asm-generic/bitops/ffz.h>
655 #include <asm-generic/bitops/find.h>
659 #include <asm-generic/bitops/sched.h>
660 #include <asm-generic/bitops/hweight.h>
661 #include <asm-generic/bitops/ext2-non-atomic.h>
662 #include <asm-generic/bitops/ext2-atomic.h>
663 #include <asm-generic/bitops/minix.h>
665 #endif /* __KERNEL__ */
667 #endif /* _ASM_BITOPS_H */