#include <sys/types.h>
#include <linux/mutex.h>
#include <linux/compiler_compat.h>
+#include <linux/lockdep.h>
typedef enum {
MUTEX_DEFAULT = 0,
MUTEX_SPIN = 1,
- MUTEX_ADAPTIVE = 2
+ MUTEX_ADAPTIVE = 2,
+ MUTEX_NOLOCKDEP = 3
} kmutex_type_t;
typedef struct {
struct mutex m_mutex;
spinlock_t m_lock; /* used for serializing mutex_exit */
kthread_t *m_owner;
+#ifdef CONFIG_LOCKDEP
+ kmutex_type_t m_type;
+#endif /* CONFIG_LOCKDEP */
} kmutex_t;
#define MUTEX(mp) (&((mp)->m_mutex))
#define MUTEX_HELD(mp) mutex_owned(mp)
#define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
+#ifdef CONFIG_LOCKDEP
+static inline void
+spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
+{
+ mp->m_type = type;
+}
+static inline void
+spl_mutex_lockdep_off_maybe(kmutex_t *mp) \
+{ \
+ if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
+ lockdep_off(); \
+}
+static inline void
+spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
+{ \
+ if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
+ lockdep_on(); \
+}
+#else /* CONFIG_LOCKDEP */
+#define spl_mutex_set_type(mp, type)
+#define spl_mutex_lockdep_off_maybe(mp)
+#define spl_mutex_lockdep_on_maybe(mp)
+#endif /* CONFIG_LOCKDEP */
+
/*
* The following functions must be a #define and not static inline.
* This ensures that the native linux mutex functions (lock/unlock)
#define mutex_init(mp, name, type, ibc) \
{ \
static struct lock_class_key __key; \
- ASSERT(type == MUTEX_DEFAULT); \
+ ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
\
__mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
spin_lock_init(&(mp)->m_lock); \
spl_mutex_clear_owner(mp); \
+ spl_mutex_set_type(mp, type); \
}
#undef mutex_destroy
({ \
int _rc_; \
\
+ spl_mutex_lockdep_off_maybe(mp); \
if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
spl_mutex_set_owner(mp); \
+ spl_mutex_lockdep_on_maybe(mp); \
\
_rc_; \
})
#define mutex_enter_nested(mp, subclass) \
{ \
ASSERT3P(mutex_owner(mp), !=, current); \
+ spl_mutex_lockdep_off_maybe(mp); \
mutex_lock_nested(MUTEX(mp), (subclass)); \
+ spl_mutex_lockdep_on_maybe(mp); \
spl_mutex_set_owner(mp); \
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
#define mutex_enter_nested(mp, subclass) \
{ \
ASSERT3P(mutex_owner(mp), !=, current); \
+ spl_mutex_lockdep_off_maybe(mp); \
mutex_lock(MUTEX(mp)); \
+ spl_mutex_lockdep_on_maybe(mp); \
spl_mutex_set_owner(mp); \
}
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
*/
#define mutex_exit(mp) \
{ \
+ spl_mutex_lockdep_off_maybe(mp); \
spin_lock(&(mp)->m_lock); \
spl_mutex_clear_owner(mp); \
mutex_unlock(MUTEX(mp)); \
spin_unlock(&(mp)->m_lock); \
+ spl_mutex_lockdep_on_maybe(mp); \
}
int spl_mutex_init(void);
typedef enum {
RW_DRIVER = 2,
- RW_DEFAULT = 4
+ RW_DEFAULT = 4,
+ RW_NOLOCKDEP = 5
} krw_type_t;
typedef enum {
#ifndef CONFIG_RWSEM_SPIN_ON_OWNER
kthread_t *rw_owner;
#endif
+#ifdef CONFIG_LOCKDEP
+ krw_type_t rw_type;
+#endif /* CONFIG_LOCKDEP */
} krwlock_t;
#define SEM(rwp) (&(rwp)->rw_rwlock)
#endif
}
+#ifdef CONFIG_LOCKDEP
+static inline void
+spl_rw_set_type(krwlock_t *rwp, krw_type_t type)
+{
+ rwp->rw_type = type;
+}
+static inline void
+spl_rw_lockdep_off_maybe(krwlock_t *rwp) \
+{ \
+ if (rwp && rwp->rw_type == RW_NOLOCKDEP) \
+ lockdep_off(); \
+}
+static inline void
+spl_rw_lockdep_on_maybe(krwlock_t *rwp) \
+{ \
+ if (rwp && rwp->rw_type == RW_NOLOCKDEP) \
+ lockdep_on(); \
+}
+#else /* CONFIG_LOCKDEP */
+#define spl_rw_set_type(rwp, type)
+#define spl_rw_lockdep_off_maybe(rwp)
+#define spl_rw_lockdep_on_maybe(rwp)
+#endif /* CONFIG_LOCKDEP */
+
static inline int
RW_READ_HELD(krwlock_t *rwp)
{
#define rw_init(rwp, name, type, arg) \
({ \
static struct lock_class_key __key; \
+ ASSERT(type == RW_DEFAULT || type == RW_NOLOCKDEP); \
\
__init_rwsem(SEM(rwp), #rwp, &__key); \
spl_rw_clear_owner(rwp); \
+ spl_rw_set_type(rwp, type); \
})
#define rw_destroy(rwp) \
({ \
int _rc_ = 0; \
\
+ spl_rw_lockdep_off_maybe(rwp); \
switch (rw) { \
case RW_READER: \
_rc_ = down_read_trylock(SEM(rwp)); \
default: \
VERIFY(0); \
} \
+ spl_rw_lockdep_on_maybe(rwp); \
_rc_; \
})
#define rw_enter(rwp, rw) \
({ \
+ spl_rw_lockdep_off_maybe(rwp); \
switch (rw) { \
case RW_READER: \
down_read(SEM(rwp)); \
default: \
VERIFY(0); \
} \
+ spl_rw_lockdep_on_maybe(rwp); \
})
#define rw_exit(rwp) \
({ \
+ spl_rw_lockdep_off_maybe(rwp); \
if (RW_WRITE_HELD(rwp)) { \
spl_rw_clear_owner(rwp); \
up_write(SEM(rwp)); \
ASSERT(RW_READ_HELD(rwp)); \
up_read(SEM(rwp)); \
} \
+ spl_rw_lockdep_on_maybe(rwp); \
})
#define rw_downgrade(rwp) \
({ \
+ spl_rw_lockdep_off_maybe(rwp); \
spl_rw_clear_owner(rwp); \
downgrade_write(SEM(rwp)); \
+ spl_rw_lockdep_on_maybe(rwp); \
})
#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
unsigned long _flags_; \
int _rc_ = 0; \
\
+ spl_rw_lockdep_off_maybe(rwp); \
spl_rwsem_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
if ((list_empty(&SEM(rwp)->wait_list)) && \
(SEM(rwp)->activity == 1)) { \
(rwp)->rw_owner = current; \
} \
spl_rwsem_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
+ spl_rw_lockdep_on_maybe(rwp); \
_rc_; \
})
#else