int error;
int i;
- ASSERT(mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
od->od_object = 0;
int missing = 0;
int i;
- ASSERT(mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
if (missing) {
int error;
int i;
- ASSERT(mutex_held(&zd->zd_dirobj_lock));
+ ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
(void) zio_resume(spa);
}
-static void *
+static void
ztest_resume_thread(void *arg)
{
spa_t *spa = arg;
}
thread_exit();
-
- return (NULL);
}
#define GRACE 300
(double)functime / NANOSEC, zi->zi_funcname);
}
-static void *
+static void
ztest_thread(void *arg)
{
int rand;
}
thread_exit();
-
- return (NULL);
}
static void
static void
ztest_run(ztest_shared_t *zs)
{
- kt_did_t *tid;
spa_t *spa;
objset_t *os;
kthread_t *resume_thread;
+ kthread_t **run_threads;
uint64_t object;
int error;
int t, d;
/*
* Create a thread to periodically resume suspended I/O.
*/
- VERIFY3P((resume_thread = zk_thread_create(NULL, 0,
- (thread_func_t)ztest_resume_thread, spa, 0, NULL, TS_RUN, 0,
- PTHREAD_CREATE_JOINABLE)), !=, NULL);
+ resume_thread = thread_create(NULL, 0, ztest_resume_thread,
+ spa, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
#if 0
/*
}
zs->zs_enospc_count = 0;
- tid = umem_zalloc(ztest_opts.zo_threads * sizeof (kt_did_t),
+ run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
* Kick off all the tests that run in parallel.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
- kthread_t *thread;
-
- if (t < ztest_opts.zo_datasets &&
- ztest_dataset_open(t) != 0) {
- umem_free(tid,
- ztest_opts.zo_threads * sizeof (kt_did_t));
+ if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) {
+ umem_free(run_threads, ztest_opts.zo_threads *
+ sizeof (kthread_t *));
return;
}
- VERIFY3P(thread = zk_thread_create(NULL, 0,
- (thread_func_t)ztest_thread,
- (void *)(uintptr_t)t, 0, NULL, TS_RUN, 0,
- PTHREAD_CREATE_JOINABLE), !=, NULL);
- tid[t] = thread->t_tid;
+ run_threads[t] = thread_create(NULL, 0, ztest_thread,
+ (void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE,
+ defclsyspri);
}
/*
* so we don't close datasets while threads are still using them.
*/
for (t = ztest_opts.zo_threads - 1; t >= 0; t--) {
- thread_join(tid[t]);
+ VERIFY0(thread_join(run_threads[t]));
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
- umem_free(tid, ztest_opts.zo_threads * sizeof (kt_did_t));
+ umem_free(run_threads, ztest_opts.zo_threads * sizeof (kthread_t *));
/* Kill the resume thread */
ztest_exiting = B_TRUE;
- thread_join(resume_thread->t_tid);
+ VERIFY0(thread_join(resume_thread));
ztest_resume(spa);
/*
(unsigned long)i)
/*
- * Threads. TS_STACK_MIN is dictated by the minimum allowed pthread stack
- * size. While TS_STACK_MAX is somewhat arbitrary, it was selected to be
- * large enough for the expected stack depth while small enough to avoid
- * exhausting address space with high thread counts.
+ * Threads.
*/
-#define TS_MAGIC 0x72f158ab4261e538ull
-#define TS_RUN 0x00000002
-#define TS_STACK_MIN MAX(PTHREAD_STACK_MIN, 32768)
-#define TS_STACK_MAX (256 * 1024)
+typedef pthread_t kthread_t;
+
+#define TS_RUN 0x00000002
+#define TS_JOINABLE 0x00000004
+
+#define curthread ((void *)(uintptr_t)pthread_self())
+#define kpreempt(x) yield()
+#define getcomm() "unknown"
+
+#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
+ zk_thread_create(func, arg, stksize, state)
+#define thread_exit() pthread_exit(NULL)
+#define thread_join(t) pthread_join((pthread_t)(t), NULL)
+
+#define newproc(f, a, cid, pri, ctp, pid) (ENOSYS)
/* in libzpool, p0 exists only to have its address taken */
typedef struct proc {
extern struct proc p0;
#define curproc (&p0)
-typedef void (*thread_func_t)(void *);
-typedef void (*thread_func_arg_t)(void *);
-typedef pthread_t kt_did_t;
-
-#define kpreempt(x) ((void)0)
-
-typedef struct kthread {
- kt_did_t t_tid;
- thread_func_t t_func;
- void * t_arg;
- pri_t t_pri;
-} kthread_t;
+#define PS_NONE -1
-#define curthread zk_thread_current()
-#define getcomm() "unknown"
-#define thread_exit zk_thread_exit
-#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
- zk_thread_create(stk, stksize, (thread_func_t)func, arg, \
- len, NULL, state, pri, PTHREAD_CREATE_DETACHED)
-#define thread_join(t) zk_thread_join(t)
-#define newproc(f, a, cid, pri, ctp, pid) (ENOSYS)
+extern kthread_t *zk_thread_create(void (*func)(void *), void *arg,
+ size_t stksize, int state);
-extern kthread_t *zk_thread_current(void);
-extern void zk_thread_exit(void);
-extern kthread_t *zk_thread_create(caddr_t stk, size_t stksize,
- thread_func_t func, void *arg, uint64_t len,
- proc_t *pp, int state, pri_t pri, int detachstate);
-extern void zk_thread_join(kt_did_t tid);
+#define issig(why) (FALSE)
+#define ISSIG(thr, why) (FALSE)
#define kpreempt_disable() ((void)0)
#define kpreempt_enable() ((void)0)
-#define PS_NONE -1
-
-#define issig(why) (FALSE)
-#define ISSIG(thr, why) (FALSE)
-
/*
* Mutexes
*/
-#define MTX_MAGIC 0x9522f51362a6e326ull
-#define MTX_INIT ((void *)NULL)
-#define MTX_DEST ((void *)-1UL)
-
typedef struct kmutex {
- void *m_owner;
- uint64_t m_magic;
- pthread_mutex_t m_lock;
+ pthread_mutex_t m_lock;
+ pthread_t m_owner;
} kmutex_t;
-#define MUTEX_DEFAULT 0
-#define MUTEX_NOLOCKDEP MUTEX_DEFAULT
-#define MUTEX_HELD(m) ((m)->m_owner == curthread)
-#define MUTEX_NOT_HELD(m) (!MUTEX_HELD(m))
+#define MUTEX_DEFAULT 0
+#define MUTEX_NOLOCKDEP MUTEX_DEFAULT
+#define MUTEX_HELD(mp) pthread_equal((mp)->m_owner, pthread_self())
+#define MUTEX_NOT_HELD(mp) !MUTEX_HELD(mp)
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
extern void mutex_destroy(kmutex_t *mp);
extern void mutex_enter(kmutex_t *mp);
extern void mutex_exit(kmutex_t *mp);
extern int mutex_tryenter(kmutex_t *mp);
-extern void *mutex_owner(kmutex_t *mp);
-extern int mutex_held(kmutex_t *mp);
/*
* RW locks
*/
-#define RW_MAGIC 0x4d31fb123648e78aull
-#define RW_INIT ((void *)NULL)
-#define RW_DEST ((void *)-1UL)
-
typedef struct krwlock {
- void *rw_owner;
- void *rw_wr_owner;
- uint64_t rw_magic;
pthread_rwlock_t rw_lock;
+ pthread_t rw_owner;
uint_t rw_readers;
} krwlock_t;
typedef int krw_t;
-#define RW_READER 0
-#define RW_WRITER 1
-#define RW_DEFAULT RW_READER
-#define RW_NOLOCKDEP RW_READER
+#define RW_READER 0
+#define RW_WRITER 1
+#define RW_DEFAULT RW_READER
+#define RW_NOLOCKDEP RW_READER
-#define RW_READ_HELD(x) ((x)->rw_readers > 0)
-#define RW_WRITE_HELD(x) ((x)->rw_wr_owner == curthread)
-#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
-
-#undef RW_LOCK_HELD
-#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
-
-#undef RW_LOCK_HELD
-#define RW_LOCK_HELD(x) (RW_READ_HELD(x) || RW_WRITE_HELD(x))
+#define RW_READ_HELD(rw) ((rw)->rw_readers > 0)
+#define RW_WRITE_HELD(rw) pthread_equal((rw)->rw_owner, pthread_self())
+#define RW_LOCK_HELD(rw) (RW_READ_HELD(rw) || RW_WRITE_HELD(rw))
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
extern void rw_destroy(krwlock_t *rwlp);
extern void rw_exit(krwlock_t *rwlp);
#define rw_downgrade(rwlp) do { } while (0)
+/*
+ * Credentials
+ */
extern uid_t crgetuid(cred_t *cr);
extern uid_t crgetruid(cred_t *cr);
extern gid_t crgetgid(cred_t *cr);
/*
* Condition variables
*/
-#define CV_MAGIC 0xd31ea9a83b1b30c4ull
-
-typedef struct kcondvar {
- uint64_t cv_magic;
- pthread_cond_t cv;
-} kcondvar_t;
+typedef pthread_cond_t kcondvar_t;
-#define CV_DEFAULT 0
+#define CV_DEFAULT 0
#define CALLOUT_FLAG_ABSOLUTE 0x2
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
hrtime_t res, int flag);
extern void cv_signal(kcondvar_t *cv);
extern void cv_broadcast(kcondvar_t *cv);
+
#define cv_timedwait_sig(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_wait_sig(cv, mp) cv_wait(cv, mp)
#define cv_wait_io(cv, mp) cv_wait(cv, mp)
* =========================================================================
* threads
* =========================================================================
+ *
+ * TS_STACK_MIN is dictated by the minimum allowed pthread stack size. While
+ * TS_STACK_MAX is somewhat arbitrary, it was selected to be large enough for
+ * the expected stack depth while small enough to avoid exhausting address
+ * space with high thread counts.
*/
+#define TS_STACK_MIN MAX(PTHREAD_STACK_MIN, 32768)
+#define TS_STACK_MAX (256 * 1024)
-pthread_cond_t kthread_cond = PTHREAD_COND_INITIALIZER;
-pthread_mutex_t kthread_lock = PTHREAD_MUTEX_INITIALIZER;
-pthread_key_t kthread_key;
-int kthread_nr = 0;
-
-static void
-thread_init(void)
-{
- kthread_t *kt;
-
- VERIFY3S(pthread_key_create(&kthread_key, NULL), ==, 0);
-
- /* Create entry for primary kthread */
- kt = umem_zalloc(sizeof (kthread_t), UMEM_NOFAIL);
- kt->t_tid = pthread_self();
- kt->t_func = NULL;
-
- VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
-
- /* Only the main thread should be running at the moment */
- ASSERT3S(kthread_nr, ==, 0);
- kthread_nr = 1;
-}
-
-static void
-thread_fini(void)
-{
- kthread_t *kt = curthread;
-
- ASSERT(pthread_equal(kt->t_tid, pthread_self()));
- ASSERT3P(kt->t_func, ==, NULL);
-
- umem_free(kt, sizeof (kthread_t));
-
- /* Wait for all threads to exit via thread_exit() */
- VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
-
- kthread_nr--; /* Main thread is exiting */
-
- while (kthread_nr > 0)
- VERIFY0(pthread_cond_wait(&kthread_cond, &kthread_lock));
-
- ASSERT3S(kthread_nr, ==, 0);
- VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
-
- VERIFY3S(pthread_key_delete(kthread_key), ==, 0);
-}
-
-kthread_t *
-zk_thread_current(void)
-{
- kthread_t *kt = pthread_getspecific(kthread_key);
-
- ASSERT3P(kt, !=, NULL);
-
- return (kt);
-}
-
-void *
-zk_thread_helper(void *arg)
-{
- kthread_t *kt = (kthread_t *)arg;
-
- VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
-
- VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
- kthread_nr++;
- VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
- (void) setpriority(PRIO_PROCESS, 0, kt->t_pri);
-
- kt->t_tid = pthread_self();
- ((thread_func_arg_t)kt->t_func)(kt->t_arg);
-
- /* Unreachable, thread must exit with thread_exit() */
- abort();
-
- return (NULL);
-}
-
+/*ARGSUSED*/
kthread_t *
-zk_thread_create(caddr_t stk, size_t stksize, thread_func_t func, void *arg,
- uint64_t len, proc_t *pp, int state, pri_t pri, int detachstate)
+zk_thread_create(void (*func)(void *), void *arg, size_t stksize, int state)
{
- kthread_t *kt;
pthread_attr_t attr;
+ pthread_t tid;
char *stkstr;
+ int detachstate = PTHREAD_CREATE_DETACHED;
- ASSERT0(state & ~TS_RUN);
- ASSERT0(len);
+ VERIFY0(pthread_attr_init(&attr));
- kt = umem_zalloc(sizeof (kthread_t), UMEM_NOFAIL);
- kt->t_func = func;
- kt->t_arg = arg;
- kt->t_pri = pri;
+ if (state & TS_JOINABLE)
+ detachstate = PTHREAD_CREATE_JOINABLE;
- VERIFY0(pthread_attr_init(&attr));
VERIFY0(pthread_attr_setdetachstate(&attr, detachstate));
/*
VERIFY3S(stksize, >, 0);
stksize = P2ROUNDUP(MAX(stksize, TS_STACK_MIN), PAGESIZE);
+
/*
* If this ever fails, it may be because the stack size is not a
* multiple of system page size.
VERIFY0(pthread_attr_setstacksize(&attr, stksize));
VERIFY0(pthread_attr_setguardsize(&attr, PAGESIZE));
- VERIFY0(pthread_create(&kt->t_tid, &attr, &zk_thread_helper, kt));
+ VERIFY0(pthread_create(&tid, &attr, (void *(*)(void *))func, arg));
VERIFY0(pthread_attr_destroy(&attr));
- return (kt);
-}
-
-void
-zk_thread_exit(void)
-{
- kthread_t *kt = curthread;
-
- ASSERT(pthread_equal(kt->t_tid, pthread_self()));
-
- umem_free(kt, sizeof (kthread_t));
-
- VERIFY0(pthread_mutex_lock(&kthread_lock));
- kthread_nr--;
- VERIFY0(pthread_mutex_unlock(&kthread_lock));
-
- VERIFY0(pthread_cond_broadcast(&kthread_cond));
- pthread_exit((void *)TS_MAGIC);
-}
-
-void
-zk_thread_join(kt_did_t tid)
-{
- void *ret;
-
- pthread_join((pthread_t)tid, &ret);
- VERIFY3P(ret, ==, (void *)TS_MAGIC);
+ return ((void *)(uintptr_t)tid);
}
/*
void
mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
{
- ASSERT3S(type, ==, MUTEX_DEFAULT);
- ASSERT3P(cookie, ==, NULL);
- mp->m_owner = MTX_INIT;
- mp->m_magic = MTX_MAGIC;
- VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
+ VERIFY0(pthread_mutex_init(&mp->m_lock, NULL));
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
}
void
mutex_destroy(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mp->m_owner, ==, MTX_INIT);
- ASSERT0(pthread_mutex_destroy(&(mp)->m_lock));
- mp->m_owner = MTX_DEST;
- mp->m_magic = 0;
+ VERIFY0(pthread_mutex_destroy(&mp->m_lock));
}
void
mutex_enter(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mp->m_owner, !=, MTX_DEST);
- ASSERT3P(mp->m_owner, !=, curthread);
- VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
- ASSERT3P(mp->m_owner, ==, MTX_INIT);
- mp->m_owner = curthread;
+ VERIFY0(pthread_mutex_lock(&mp->m_lock));
+ mp->m_owner = pthread_self();
}
int
mutex_tryenter(kmutex_t *mp)
{
- int err;
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mp->m_owner, !=, MTX_DEST);
- if (0 == (err = pthread_mutex_trylock(&mp->m_lock))) {
- ASSERT3P(mp->m_owner, ==, MTX_INIT);
- mp->m_owner = curthread;
+ int error;
+
+ error = pthread_mutex_trylock(&mp->m_lock);
+ if (error == 0) {
+ mp->m_owner = pthread_self();
return (1);
} else {
- VERIFY3S(err, ==, EBUSY);
+ VERIFY3S(error, ==, EBUSY);
return (0);
}
}
void
mutex_exit(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mutex_owner(mp), ==, curthread);
- mp->m_owner = MTX_INIT;
- VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
-}
-
-void *
-mutex_owner(kmutex_t *mp)
-{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- return (mp->m_owner);
-}
-
-int
-mutex_held(kmutex_t *mp)
-{
- return (mp->m_owner == curthread);
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ VERIFY0(pthread_mutex_unlock(&mp->m_lock));
}
/*
void
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
{
- ASSERT3S(type, ==, RW_DEFAULT);
- ASSERT3P(arg, ==, NULL);
- VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
- rwlp->rw_owner = RW_INIT;
- rwlp->rw_wr_owner = RW_INIT;
+ VERIFY0(pthread_rwlock_init(&rwlp->rw_lock, NULL));
rwlp->rw_readers = 0;
- rwlp->rw_magic = RW_MAGIC;
+ rwlp->rw_owner = 0;
}
void
rw_destroy(krwlock_t *rwlp)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
- ASSERT(rwlp->rw_readers == 0 && rwlp->rw_wr_owner == RW_INIT);
- VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
- rwlp->rw_magic = 0;
+ VERIFY0(pthread_rwlock_destroy(&rwlp->rw_lock));
}
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
- ASSERT3P(rwlp->rw_owner, !=, curthread);
- ASSERT3P(rwlp->rw_wr_owner, !=, curthread);
-
if (rw == RW_READER) {
- VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
- ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
-
+ VERIFY0(pthread_rwlock_rdlock(&rwlp->rw_lock));
atomic_inc_uint(&rwlp->rw_readers);
} else {
- VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
- ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
- ASSERT3U(rwlp->rw_readers, ==, 0);
-
- rwlp->rw_wr_owner = curthread;
+ VERIFY0(pthread_rwlock_wrlock(&rwlp->rw_lock));
+ rwlp->rw_owner = pthread_self();
}
-
- rwlp->rw_owner = curthread;
}
void
rw_exit(krwlock_t *rwlp)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
- ASSERT(RW_LOCK_HELD(rwlp));
-
if (RW_READ_HELD(rwlp))
atomic_dec_uint(&rwlp->rw_readers);
else
- rwlp->rw_wr_owner = RW_INIT;
+ rwlp->rw_owner = 0;
- rwlp->rw_owner = RW_INIT;
- VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
+ VERIFY0(pthread_rwlock_unlock(&rwlp->rw_lock));
}
int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
- int rv;
-
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
+ int error;
if (rw == RW_READER)
- rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
+ error = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
else
- rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
-
- if (rv == 0) {
- ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
+ error = pthread_rwlock_trywrlock(&rwlp->rw_lock);
+ if (error == 0) {
if (rw == RW_READER)
atomic_inc_uint(&rwlp->rw_readers);
- else {
- ASSERT3U(rwlp->rw_readers, ==, 0);
- rwlp->rw_wr_owner = curthread;
- }
+ else
+ rwlp->rw_owner = pthread_self();
- rwlp->rw_owner = curthread;
return (1);
}
- VERIFY3S(rv, ==, EBUSY);
+ VERIFY3S(error, ==, EBUSY);
return (0);
}
int
rw_tryupgrade(krwlock_t *rwlp)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
-
return (0);
}
void
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
{
- ASSERT3S(type, ==, CV_DEFAULT);
- cv->cv_magic = CV_MAGIC;
- VERIFY0(pthread_cond_init(&cv->cv, NULL));
+ VERIFY0(pthread_cond_init(cv, NULL));
}
void
cv_destroy(kcondvar_t *cv)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- VERIFY0(pthread_cond_destroy(&cv->cv));
- cv->cv_magic = 0;
+ VERIFY0(pthread_cond_destroy(cv));
}
void
cv_wait(kcondvar_t *cv, kmutex_t *mp)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- ASSERT3P(mutex_owner(mp), ==, curthread);
- mp->m_owner = MTX_INIT;
- VERIFY0(pthread_cond_wait(&cv->cv, &mp->m_lock));
- mp->m_owner = curthread;
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ VERIFY0(pthread_cond_wait(cv, &mp->m_lock));
+ mp->m_owner = pthread_self();
}
clock_t
timestruc_t ts;
clock_t delta;
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
-
delta = abstime - ddi_get_lbolt();
if (delta <= 0)
return (-1);
ts.tv_nsec -= NANOSEC;
}
- ASSERT3P(mutex_owner(mp), ==, curthread);
- mp->m_owner = MTX_INIT;
- error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
- mp->m_owner = curthread;
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
+ mp->m_owner = pthread_self();
if (error == ETIMEDOUT)
return (-1);
if (delta <= 0)
return (-1);
- VERIFY(gettimeofday(&tv, NULL) == 0);
+ VERIFY0(gettimeofday(&tv, NULL));
ts.tv_sec = tv.tv_sec + delta / NANOSEC;
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % NANOSEC);
ts.tv_nsec -= NANOSEC;
}
- ASSERT(mutex_owner(mp) == curthread);
- mp->m_owner = MTX_INIT;
- error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
- mp->m_owner = curthread;
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
+ mp->m_owner = pthread_self();
if (error == ETIMEDOUT)
return (-1);
void
cv_signal(kcondvar_t *cv)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- VERIFY0(pthread_cond_signal(&cv->cv));
+ VERIFY0(pthread_cond_signal(cv));
}
void
cv_broadcast(kcondvar_t *cv)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- VERIFY0(pthread_cond_broadcast(&cv->cv));
+ VERIFY0(pthread_cond_broadcast(cv));
}
/*
VERIFY0(uname(&hw_utsname));
- thread_init();
system_taskq_init();
icp_init();
icp_fini();
system_taskq_fini();
- thread_fini();
random_fini();
}