1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler_compat.h>
38 #if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP)
40 typedef struct mutex kmutex_t;
42 static inline kthread_t *
43 mutex_owner(kmutex_t *mp)
45 struct thread_info *owner;
47 owner = ACCESS_ONCE(mp->owner);
55 mutex_owned(kmutex_t *mp)
57 return (ACCESS_ONCE(mp->owner) == current_thread_info());
60 #define MUTEX_HELD(mp) mutex_owned(mp)
61 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
63 #define mutex_init(mp, name, type, ibc) \
65 static struct lock_class_key __key; \
66 ASSERT(type == MUTEX_DEFAULT); \
68 __mutex_init((mp), #mp, &__key); \
72 #define mutex_destroy(mp) \
74 VERIFY3P(mutex_owner(mp), ==, NULL); \
77 #define mutex_tryenter(mp) mutex_trylock(mp)
78 #define mutex_enter(mp) mutex_lock(mp)
80 /* mutex->owner is not cleared when CONFIG_DEBUG_MUTEXES is set */
81 #ifdef CONFIG_DEBUG_MUTEXES
82 # define mutex_exit(mp) \
88 # define mutex_exit(mp) mutex_unlock(mp)
89 #endif /* CONFIG_DEBUG_MUTEXES */
91 #ifdef HAVE_GPL_ONLY_SYMBOLS
92 # define mutex_enter_nested(mp, sc) mutex_lock_nested(mp, sc)
94 # define mutex_enter_nested(mp, sc) mutex_enter(mp)
95 #endif /* HAVE_GPL_ONLY_SYMBOLS */
97 #else /* HAVE_MUTEX_OWNER */
100 struct mutex m_mutex;
104 #ifdef HAVE_TASK_CURR
105 extern int spl_mutex_spin_max(void);
106 #else /* HAVE_TASK_CURR */
107 # define task_curr(owner) 0
108 # define spl_mutex_spin_max() 0
109 #endif /* HAVE_TASK_CURR */
111 #define MUTEX(mp) ((struct mutex *)(mp))
113 static inline kthread_t *
114 spl_mutex_get_owner(kmutex_t *mp)
120 spl_mutex_set_owner(kmutex_t *mp)
124 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
125 mp->m_owner = current;
126 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
130 spl_mutex_clear_owner(kmutex_t *mp)
134 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
136 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
139 static inline kthread_t *
140 mutex_owner(kmutex_t *mp)
145 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
146 owner = spl_mutex_get_owner(mp);
147 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
152 #define mutex_owned(mp) (mutex_owner(mp) == current)
153 #define MUTEX_HELD(mp) mutex_owned(mp)
154 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
157 * The following functions must be a #define and not static inline.
158 * This ensures that the native linux mutex functions (lock/unlock)
159 * will be correctly located in the users code which is important
160 * for the built in kernel lock analysis tools
163 #define mutex_init(mp, name, type, ibc) \
165 static struct lock_class_key __key; \
166 ASSERT(type == MUTEX_DEFAULT); \
168 __mutex_init(MUTEX(mp), #mp, &__key); \
169 spl_mutex_clear_owner(mp); \
173 #define mutex_destroy(mp) \
175 VERIFY3P(mutex_owner(mp), ==, NULL); \
178 #define mutex_tryenter(mp) \
182 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
183 spl_mutex_set_owner(mp); \
189 * Adaptive mutexs assume that the lock may be held by a task running
190 * on a different cpu. The expectation is that the task will drop the
191 * lock before leaving the head of the run queue. So the ideal thing
192 * to do is spin until we acquire the lock and avoid a context switch.
193 * However it is also possible the task holding the lock yields the
194 * processor with out dropping lock. In this case, we know it's going
195 * to be a while so we stop spinning and go to sleep waiting for the
196 * lock to be available. This should strike the optimum balance
197 * between spinning and sleeping waiting for a lock.
199 #define mutex_enter(mp) \
201 kthread_t *_owner_; \
206 _owner_ = mutex_owner(mp); \
208 while (_owner_ && task_curr(_owner_) && \
209 _count_ <= spl_mutex_spin_max()) { \
210 if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
217 mutex_lock(MUTEX(mp)); \
219 spl_mutex_set_owner(mp); \
222 #define mutex_exit(mp) \
224 spl_mutex_clear_owner(mp); \
225 mutex_unlock(MUTEX(mp)); \
228 #ifdef HAVE_GPL_ONLY_SYMBOLS
229 # define mutex_enter_nested(mp, sc) \
231 mutex_lock_nested(MUTEX(mp, sc)); \
232 spl_mutex_set_owner(mp); \
235 # define mutex_enter_nested(mp, sc) \
241 #endif /* HAVE_MUTEX_OWNER */
243 int spl_mutex_init(void);
244 void spl_mutex_fini(void);
246 #endif /* _SPL_MUTEX_H */