#include <linux/rwsem.h>
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+#if defined(CONFIG_PREEMPT_RT_FULL)
+#define SPL_RWSEM_SINGLE_READER_VALUE (1)
+#define SPL_RWSEM_SINGLE_WRITER_VALUE (0)
+#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
#define SPL_RWSEM_SINGLE_READER_VALUE (1)
#define SPL_RWSEM_SINGLE_WRITER_VALUE (-1)
#else
#endif
/* Linux 3.16 changed activity to count for rwsem-spinlock */
-#if defined(HAVE_RWSEM_ACTIVITY)
+#if defined(CONFIG_PREEMPT_RT_FULL)
+#define RWSEM_COUNT(sem) sem->read_depth
+#elif defined(HAVE_RWSEM_ACTIVITY)
#define RWSEM_COUNT(sem) sem->activity
/* Linux 4.8 changed count to an atomic_long_t for !rwsem-spinlock */
#elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
#define DEBUG_SUBSYSTEM S_RWLOCK
-#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
+#if defined(CONFIG_PREEMPT_RT_FULL)
+
+#include <linux/rtmutex.h>
+
+static int
+__rwsem_tryupgrade(struct rw_semaphore *rwsem)
+{
+ ASSERT(rt_mutex_owner(&rwsem->lock) == current);
+
+ /*
+ * Under the realtime patch series, rwsem is implemented as a
+ * single mutex held by readers and writers alike. However,
+ * this implementation would prevent a thread from taking a
+ * read lock twice, as the mutex would already be locked on
+ * the second attempt. Therefore the implementation allows a
+ * single thread to take a rwsem as read lock multiple times
+ * tracking that nesting as read_depth counter.
+ */
+ if (rwsem->read_depth <= 1) {
+ /*
+ * In case, the current thread has not taken the lock
+ * more than once as read lock, we can allow an
+ * upgrade to a write lock. rwsem_rt.h implements
+ * write locks as read_depth == 0.
+ */
+ rwsem->read_depth = 0;
+ return (1);
+ }
+ return (0);
+}
+#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
static int
__rwsem_tryupgrade(struct rw_semaphore *rwsem)
{
rwp->rw_type = 0;
}
+#if defined(CONFIG_PREEMPT_RT_FULL)
+static int
+splat_rwlock_test1(struct file *file, void *arg)
+{
+ /*
+ * This test will never succeed on PREEMPT_RT_FULL because these
+ * kernels only allow a single thread to hold the lock.
+ */
+ return 0;
+}
+#else
static int
splat_rwlock_wr_thr(void *arg)
{
return rc;
}
+#endif
static void
splat_rwlock_test2_func(void *arg)
splat_init_rw_priv(rwp, file);
- /* Validate all combinations of rw_tryenter() contention */
+ /*
+ * Validate all combinations of rw_tryenter() contention.
+ *
+ * The concurrent reader test is modified for PREEMPT_RT_FULL
+ * kernels which do not permit concurrent read locks to be taken
+ * from different threads. The same thread is allowed to take
+ * the read lock multiple times.
+ */
rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
+#if defined(CONFIG_PREEMPT_RT_FULL)
+ rc4 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_READER);
+#else
rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER);
+#endif
rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER);
rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER);