2 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
14 #include "private/thread_local_alloc.h"
15 /* To determine type of tsd impl. */
16 /* Includes private/specific.h */
19 #if defined(USE_CUSTOM_SPECIFIC)
21 static const tse invalid_tse = {INVALID_QTID, 0, 0, INVALID_THREADID};
22 /* A thread-specific data entry which will never */
23 /* appear valid to a reader. Used to fill in empty */
24 /* cache entries to avoid a check for 0. */
26 GC_INNER int GC_key_create_inner(tsd ** key_ptr)
32 GC_ASSERT(I_HOLD_LOCK());
33 /* A quick alignment check, since we need atomic stores */
34 GC_ASSERT((word)(&invalid_tse.next) % sizeof(tse *) == 0);
35 result = (tsd *)MALLOC_CLEAR(sizeof(tsd));
36 if (NULL == result) return ENOMEM;
37 ret = pthread_mutex_init(&result->lock, NULL);
38 if (ret != 0) return ret;
39 for (i = 0; i < TS_CACHE_SIZE; ++i) {
40 result -> cache[i] = (/* no const */ tse *)&invalid_tse;
43 for (i = 0; i < TS_HASH_SIZE; ++i) {
44 GC_ASSERT(result -> hash[i].p == 0);
51 GC_INNER int GC_setspecific(tsd * key, void * value)
53 pthread_t self = pthread_self();
54 unsigned hash_val = HASH(self);
57 GC_ASSERT(I_HOLD_LOCK());
58 GC_ASSERT(self != INVALID_THREADID);
59 GC_dont_gc++; /* disable GC */
60 entry = (volatile tse *)MALLOC_CLEAR(sizeof(tse));
62 if (0 == entry) return ENOMEM;
64 pthread_mutex_lock(&(key -> lock));
65 /* Could easily check for an existing entry here. */
66 entry -> next = key->hash[hash_val].p;
67 entry -> thread = self;
68 entry -> value = TS_HIDE_VALUE(value);
69 GC_ASSERT(entry -> qtid == INVALID_QTID);
70 /* There can only be one writer at a time, but this needs to be */
71 /* atomic with respect to concurrent readers. */
72 AO_store_release(&key->hash[hash_val].ao, (AO_t)entry);
73 GC_dirty((/* no volatile */ void *)entry);
74 GC_dirty(key->hash + hash_val);
75 if (pthread_mutex_unlock(&key->lock) != 0)
76 ABORT("pthread_mutex_unlock failed (setspecific)");
80 /* Remove thread-specific data for a given thread. This function is */
81 /* called at fork from the child process for all threads except for the */
82 /* survived one. GC_remove_specific() should be called on thread exit. */
83 GC_INNER void GC_remove_specific_after_fork(tsd * key, pthread_t t)
85 unsigned hash_val = HASH(t);
89 # ifdef CAN_HANDLE_FORK
90 /* Both GC_setspecific and GC_remove_specific should be called */
91 /* with the allocation lock held to ensure the consistency of */
92 /* the hash table in the forked child. */
93 GC_ASSERT(I_HOLD_LOCK());
95 pthread_mutex_lock(&(key -> lock));
96 entry = key->hash[hash_val].p;
97 while (entry != NULL && !THREAD_EQUAL(entry->thread, t)) {
101 /* Invalidate qtid field, since qtids may be reused, and a later */
102 /* cache lookup could otherwise find this entry. */
104 entry -> qtid = INVALID_QTID;
106 key->hash[hash_val].p = entry->next;
107 GC_dirty(key->hash + hash_val);
109 prev->next = entry->next;
112 /* Atomic! concurrent accesses still work. */
113 /* They must, since readers don't lock. */
114 /* We shouldn't need a volatile access here, */
115 /* since both this and the preceding write */
116 /* should become visible no later than */
117 /* the pthread_mutex_unlock() call. */
119 /* If we wanted to deallocate the entry, we'd first have to clear */
120 /* any cache entries pointing to it. That probably requires */
121 /* additional synchronization, since we can't prevent a concurrent */
122 /* cache lookup, which should still be examining deallocated memory.*/
123 /* This can only happen if the concurrent access is from another */
124 /* thread, and hence has missed the cache, but still... */
126 GC_noop1((word)entry);
129 /* With GC, we're done, since the pointers from the cache will */
130 /* be overwritten, all local pointers to the entries will be */
131 /* dropped, and the entry will then be reclaimed. */
132 if (pthread_mutex_unlock(&key->lock) != 0)
133 ABORT("pthread_mutex_unlock failed (remove_specific after fork)");
136 /* Note that even the slow path doesn't lock. */
137 GC_INNER void * GC_slow_getspecific(tsd * key, word qtid,
138 tse * volatile * cache_ptr)
140 pthread_t self = pthread_self();
141 tse *entry = key->hash[HASH(self)].p;
143 GC_ASSERT(qtid != INVALID_QTID);
144 while (entry != NULL && !THREAD_EQUAL(entry->thread, self)) {
145 entry = entry -> next;
147 if (entry == NULL) return NULL;
148 /* Set cache_entry. */
149 entry -> qtid = (AO_t)qtid;
150 /* It's safe to do this asynchronously. Either value */
151 /* is safe, though may produce spurious misses. */
152 /* We're replacing one qtid with another one for the */
155 /* Again this is safe since pointer assignments are */
156 /* presumed atomic, and either pointer is valid. */
157 return TS_REVEAL_PTR(entry -> value);
161 /* Check that that all elements of the data structure associated */
162 /* with key are marked. */
163 void GC_check_tsd_marks(tsd *key)
168 if (!GC_is_marked(GC_base(key))) {
169 ABORT("Unmarked thread-specific-data table");
171 for (i = 0; i < TS_HASH_SIZE; ++i) {
172 for (p = key->hash[i].p; p != 0; p = p -> next) {
173 if (!GC_is_marked(GC_base(p))) {
174 ABORT_ARG1("Unmarked thread-specific-data entry",
175 " at %p", (void *)p);
179 for (i = 0; i < TS_CACHE_SIZE; ++i) {
181 if (p != &invalid_tse && !GC_is_marked(GC_base(p))) {
182 ABORT_ARG1("Unmarked cached thread-specific-data entry",
183 " at %p", (void *)p);
187 #endif /* GC_ASSERTIONS */
189 #endif /* USE_CUSTOM_SPECIFIC */