*/
# if defined(_I386_RWSEM_H) || defined(_ASM_X86_RWSEM_H)
# define RW_COUNT(rwp) ((SEM(rwp)->count < 0) ? (-1) : \
- (SEM(rwp)->count & RWSEM_ACTIVE_MASK))
+ (SEM(rwp)->count & RWSEM_ACTIVE_MASK))
# else
# define RW_COUNT(rwp) (SEM(rwp)->count & RWSEM_ACTIVE_MASK)
# endif
downgrade_write(SEM(rwp)); \
})
+#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
#define rw_tryupgrade(rwp) \
({ \
unsigned long _flags_; \
spin_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
_rc_; \
})
+#else
+/*
+ * This can be done correctly but for each supported arch we will need
+ * a custom cmpxchg() to atomically check and promote the rwsem. That's
+ * not worth the trouble for now so rw_tryupgrade() will always fail.
+ */
+#define rw_tryupgrade(rwp) ({ 0; })
+#endif
int spl_rw_init(void);
void spl_rw_fini(void);