#include <linux/rwsem.h>
#include <linux/sched.h>
-/* Linux kernel compatibility */
-#if defined(CONFIG_PREEMPT_RT_FULL)
-#define SPL_RWSEM_SINGLE_READER_VALUE (1)
-#define SPL_RWSEM_SINGLE_WRITER_VALUE (0)
-#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define SPL_RWSEM_SINGLE_READER_VALUE (1)
-#define SPL_RWSEM_SINGLE_WRITER_VALUE (-1)
-#elif defined(RWSEM_ACTIVE_MASK)
-#define SPL_RWSEM_SINGLE_READER_VALUE (RWSEM_ACTIVE_READ_BIAS)
-#define SPL_RWSEM_SINGLE_WRITER_VALUE (RWSEM_ACTIVE_WRITE_BIAS)
-#endif
-
-/* Linux 3.16 changed activity to count for rwsem-spinlock */
-#if defined(CONFIG_PREEMPT_RT_FULL)
-#define RWSEM_COUNT(sem) sem->read_depth
-#elif defined(HAVE_RWSEM_ACTIVITY)
-#define RWSEM_COUNT(sem) sem->activity
-/* Linux 4.8 changed count to an atomic_long_t for !rwsem-spinlock */
-#elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
-#define RWSEM_COUNT(sem) atomic_long_read(&(sem)->count)
-#else
-#define RWSEM_COUNT(sem) sem->count
-#endif
-
-#if defined(RWSEM_SPINLOCK_IS_RAW)
-#define spl_rwsem_lock_irqsave(lk, fl) raw_spin_lock_irqsave(lk, fl)
-#define spl_rwsem_unlock_irqrestore(lk, fl) \
- raw_spin_unlock_irqrestore(lk, fl)
-#define spl_rwsem_trylock_irqsave(lk, fl) raw_spin_trylock_irqsave(lk, fl)
-#else
-#define spl_rwsem_lock_irqsave(lk, fl) spin_lock_irqsave(lk, fl)
-#define spl_rwsem_unlock_irqrestore(lk, fl) spin_unlock_irqrestore(lk, fl)
-#define spl_rwsem_trylock_irqsave(lk, fl) spin_trylock_irqsave(lk, fl)
-#endif /* RWSEM_SPINLOCK_IS_RAW */
-
-#define spl_rwsem_is_locked(rwsem) rwsem_is_locked(rwsem)
-
typedef enum {
RW_DRIVER = 2,
RW_DEFAULT = 4,
static inline int
RW_LOCK_HELD(krwlock_t *rwp)
{
- return (spl_rwsem_is_locked(SEM(rwp)));
+ return (rwsem_is_locked(SEM(rwp)));
}
static inline int
*/
#define rw_destroy(rwp) ((void) 0)
+/*
+ * Upgrading a rwsem from a reader to a writer is not supported by the
+ * Linux kernel. The lock must be dropped and reacquired as a writer.
+ */
+#define rw_tryupgrade(rwp) RW_WRITE_HELD(rwp)
+
#define rw_tryenter(rwp, rw) \
({ \
int _rc_ = 0; \
spl_rw_lockdep_on_maybe(rwp); \
})
-#define rw_tryupgrade(rwp) \
-({ \
- int _rc_ = 0; \
- \
- if (RW_WRITE_HELD(rwp)) { \
- _rc_ = 1; \
- } else { \
- spl_rw_lockdep_off_maybe(rwp); \
- if ((_rc_ = rwsem_tryupgrade(SEM(rwp)))) \
- spl_rw_set_owner(rwp); \
- spl_rw_lockdep_on_maybe(rwp); \
- } \
- _rc_; \
-})
/* END CSTYLED */
int spl_rw_init(void);
void spl_rw_fini(void);
-int rwsem_tryupgrade(struct rw_semaphore *rwsem);
#endif /* _SPL_RWLOCK_H */
* Solaris Porting Layer (SPL) Reader/Writer Lock Implementation.
*/
-#include <sys/rwlock.h>
-#include <linux/module.h>
-
-#if defined(CONFIG_PREEMPT_RT_FULL)
-
-#include <linux/rtmutex.h>
-#define RT_MUTEX_OWNER_MASKALL 1UL
-
-static int
-__rwsem_tryupgrade(struct rw_semaphore *rwsem)
-{
-#if defined(READER_BIAS) && defined(WRITER_BIAS)
- /*
- * After the 4.9.20-rt16 kernel the realtime patch series lifted the
- * single reader restriction. While this could be accommodated by
- * adding additional compatibility code assume the rwsem can never
- * be upgraded. All caller must already cleanly handle this case.
- */
- return (0);
-#else
- ASSERT((struct task_struct *)
- ((unsigned long)rwsem->lock.owner & ~RT_MUTEX_OWNER_MASKALL) ==
- current);
-
- /*
- * Prior to 4.9.20-rt16 kernel the realtime patch series, rwsem is
- * implemented as a single mutex held by readers and writers alike.
- * However, this implementation would prevent a thread from taking
- * a read lock twice, as the mutex would already be locked on
- * the second attempt. Therefore the implementation allows a
- * single thread to take a rwsem as read lock multiple times
- * tracking that nesting as read_depth counter.
- */
- if (rwsem->read_depth <= 1) {
- /*
- * In case, the current thread has not taken the lock
- * more than once as read lock, we can allow an
- * upgrade to a write lock. rwsem_rt.h implements
- * write locks as read_depth == 0.
- */
- rwsem->read_depth = 0;
- return (1);
- }
- return (0);
-#endif
-}
-#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-static int
-__rwsem_tryupgrade(struct rw_semaphore *rwsem)
-{
- int ret = 0;
- unsigned long flags;
- spl_rwsem_lock_irqsave(&rwsem->wait_lock, flags);
- if (RWSEM_COUNT(rwsem) == SPL_RWSEM_SINGLE_READER_VALUE &&
- list_empty(&rwsem->wait_list)) {
- ret = 1;
- RWSEM_COUNT(rwsem) = SPL_RWSEM_SINGLE_WRITER_VALUE;
- }
- spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, flags);
- return (ret);
-}
-#elif defined(RWSEM_ACTIVE_MASK)
-#if defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
-static int
-__rwsem_tryupgrade(struct rw_semaphore *rwsem)
-{
- long val;
- val = atomic_long_cmpxchg(&rwsem->count, SPL_RWSEM_SINGLE_READER_VALUE,
- SPL_RWSEM_SINGLE_WRITER_VALUE);
- return (val == SPL_RWSEM_SINGLE_READER_VALUE);
-}
-#else
-static int
-__rwsem_tryupgrade(struct rw_semaphore *rwsem)
-{
- typeof(rwsem->count) val;
- val = cmpxchg(&rwsem->count, SPL_RWSEM_SINGLE_READER_VALUE,
- SPL_RWSEM_SINGLE_WRITER_VALUE);
- return (val == SPL_RWSEM_SINGLE_READER_VALUE);
-}
-#endif
-#else
-static int
-__rwsem_tryupgrade(struct rw_semaphore *rwsem)
-{
- return (0);
-}
-#endif
-
-int
-rwsem_tryupgrade(struct rw_semaphore *rwsem)
-{
- if (__rwsem_tryupgrade(rwsem)) {
- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
- return (1);
- }
- return (0);
-}
-EXPORT_SYMBOL(rwsem_tryupgrade);
-
int spl_rw_init(void) { return 0; }
void spl_rw_fini(void) { }