IA64 is using the generic version of __raw_read_trylock, which always
waits for the lock to be free instead of returning when the lock is in
use.  Define an ia64 version of __raw_read_trylock which behaves
correctly, and drop the generic one.
Signed-off-by: Keith Owens <kaos@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
 
 #endif /* !ASM_SUPPORTED */
 
-#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
+static inline int __raw_read_trylock(raw_rwlock_t *x)
+{
+       union {
+               raw_rwlock_t lock;
+               __u32 word;
+       } old, new;
+       old.lock = new.lock = *x;
+       old.lock.write_lock = new.lock.write_lock = 0;
+       ++new.lock.read_counter;
+       return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
+}
 
 #endif /*  _ASM_IA64_SPINLOCK_H */