]> granicus.if.org Git - zfs/blob - include/sys/mutex.h
Clear owner after dropping mutex
[zfs] / include / sys / mutex.h
1 /*****************************************************************************\
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://github.com/behlendorf/spl/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler_compat.h>
31
32 typedef enum {
33         MUTEX_DEFAULT  = 0,
34         MUTEX_SPIN     = 1,
35         MUTEX_ADAPTIVE = 2
36 } kmutex_type_t;
37
38 #if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP)
39
40 typedef struct mutex kmutex_t;
41
42 static inline kthread_t *
43 mutex_owner(kmutex_t *mp)
44 {
45         struct thread_info *owner;
46
47         owner = ACCESS_ONCE(mp->owner);
48         if (owner)
49                 return owner->task;
50
51         return NULL;
52 }
53
54 static inline int
55 mutex_owned(kmutex_t *mp)
56 {
57         return (ACCESS_ONCE(mp->owner) == current_thread_info());
58 }
59
60 #define MUTEX_HELD(mp)          mutex_owned(mp)
61 #define MUTEX_NOT_HELD(mp)      (!MUTEX_HELD(mp))
62 #undef mutex_init
63 #define mutex_init(mp, name, type, ibc)                                 \
64 ({                                                                      \
65         static struct lock_class_key __key;                             \
66         ASSERT(type == MUTEX_DEFAULT);                                  \
67                                                                         \
68         __mutex_init((mp), #mp, &__key);                                \
69 })
70
71 #undef mutex_destroy
72 #define mutex_destroy(mp)                                               \
73 ({                                                                      \
74         VERIFY3P(mutex_owner(mp), ==, NULL);                            \
75 })
76
77 #define mutex_tryenter(mp)              mutex_trylock(mp)
78 #define mutex_enter(mp)                 mutex_lock(mp)
79
80 /* mutex->owner is not cleared when CONFIG_DEBUG_MUTEXES is set */
81 #ifdef CONFIG_DEBUG_MUTEXES
82 # define mutex_exit(mp)                                                 \
83 ({                                                                      \
84         mutex_unlock(mp);                                               \
85         (mp)->owner = NULL;                                             \
86 })
87 #else
88 # define mutex_exit(mp)                 mutex_unlock(mp)
89 #endif /* CONFIG_DEBUG_MUTEXES */
90
91 #ifdef HAVE_GPL_ONLY_SYMBOLS
92 # define mutex_enter_nested(mp, sc)     mutex_lock_nested(mp, sc)
93 #else
94 # define mutex_enter_nested(mp, sc)     mutex_enter(mp)
95 #endif /* HAVE_GPL_ONLY_SYMBOLS */
96
97 #else /* HAVE_MUTEX_OWNER */
98
99 typedef struct {
100         struct mutex m_mutex;
101         kthread_t *m_owner;
102 } kmutex_t;
103
104 #ifdef HAVE_TASK_CURR
105 extern int spl_mutex_spin_max(void);
106 #else /* HAVE_TASK_CURR */
107 # define task_curr(owner)       0
108 # define spl_mutex_spin_max()   0
109 #endif /* HAVE_TASK_CURR */
110
111 #define MUTEX(mp)               ((struct mutex *)(mp))
112
113 static inline kthread_t *
114 spl_mutex_get_owner(kmutex_t *mp)
115 {
116         return mp->m_owner;
117 }
118
119 static inline void
120 spl_mutex_set_owner(kmutex_t *mp)
121 {
122         unsigned long flags;
123
124         spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
125         mp->m_owner = current;
126         spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
127 }
128
129 static inline void
130 spl_mutex_clear_owner(kmutex_t *mp)
131 {
132         unsigned long flags;
133
134         spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
135         mp->m_owner = NULL;
136         spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
137 }
138
139 static inline kthread_t *
140 mutex_owner(kmutex_t *mp)
141 {
142         unsigned long flags;
143         kthread_t *owner;
144
145         spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
146         owner = spl_mutex_get_owner(mp);
147         spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
148
149         return owner;
150 }
151
152 #define mutex_owned(mp)         (mutex_owner(mp) == current)
153 #define MUTEX_HELD(mp)          mutex_owned(mp)
154 #define MUTEX_NOT_HELD(mp)      (!MUTEX_HELD(mp))
155
156 /*
157  * The following functions must be a #define and not static inline.
158  * This ensures that the native linux mutex functions (lock/unlock)
159  * will be correctly located in the users code which is important
160  * for the built in kernel lock analysis tools
161  */
162 #undef mutex_init
163 #define mutex_init(mp, name, type, ibc)                                 \
164 ({                                                                      \
165         static struct lock_class_key __key;                             \
166         ASSERT(type == MUTEX_DEFAULT);                                  \
167                                                                         \
168         __mutex_init(MUTEX(mp), #mp, &__key);                           \
169         spl_mutex_clear_owner(mp);                                      \
170 })
171
172 #undef mutex_destroy
173 #define mutex_destroy(mp)                                               \
174 ({                                                                      \
175         VERIFY3P(mutex_owner(mp), ==, NULL);                            \
176 })
177
178 #define mutex_tryenter(mp)                                              \
179 ({                                                                      \
180         int _rc_;                                                       \
181                                                                         \
182         if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1)                     \
183                 spl_mutex_set_owner(mp);                                \
184                                                                         \
185         _rc_;                                                           \
186 })
187
188 /*
189  * Adaptive mutexs assume that the lock may be held by a task running
190  * on a different cpu.  The expectation is that the task will drop the
191  * lock before leaving the head of the run queue.  So the ideal thing
192  * to do is spin until we acquire the lock and avoid a context switch.
193  * However it is also possible the task holding the lock yields the
194  * processor with out dropping lock.  In this case, we know it's going
195  * to be a while so we stop spinning and go to sleep waiting for the
196  * lock to be available.  This should strike the optimum balance
197  * between spinning and sleeping waiting for a lock.
198  */
199 #define mutex_enter(mp)                                                 \
200 ({                                                                      \
201         kthread_t *_owner_;                                             \
202         int _rc_, _count_;                                              \
203                                                                         \
204         _rc_ = 0;                                                       \
205         _count_ = 0;                                                    \
206         _owner_ = mutex_owner(mp);                                      \
207                                                                         \
208         while (_owner_ && task_curr(_owner_) &&                         \
209                _count_ <= spl_mutex_spin_max()) {                       \
210                 if ((_rc_ = mutex_trylock(MUTEX(mp))))                  \
211                         break;                                          \
212                                                                         \
213                 _count_++;                                              \
214         }                                                               \
215                                                                         \
216         if (!_rc_)                                                      \
217                 mutex_lock(MUTEX(mp));                                  \
218                                                                         \
219         spl_mutex_set_owner(mp);                                        \
220 })
221
222 #define mutex_exit(mp)                                                  \
223 ({                                                                      \
224         spl_mutex_clear_owner(mp);                                      \
225         mutex_unlock(MUTEX(mp));                                        \
226 })
227
228 #ifdef HAVE_GPL_ONLY_SYMBOLS
229 # define mutex_enter_nested(mp, sc)                                     \
230 ({                                                                      \
231         mutex_lock_nested(MUTEX(mp, sc));                               \
232         spl_mutex_set_owner(mp);                                        \
233 })
234 #else
235 # define mutex_enter_nested(mp, sc)                                     \
236 ({                                                                      \
237         mutex_enter(mp);                                                \
238 })
239 #endif
240
241 #endif /* HAVE_MUTEX_OWNER */
242
243 int spl_mutex_init(void);
244 void spl_mutex_fini(void);
245
246 #endif /* _SPL_MUTEX_H */