1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "apr_atomic.h"
20 typedef struct recycled_pool {
22 struct recycled_pool *next;
25 struct fd_queue_info_t {
27 apr_thread_mutex_t *idlers_mutex;
28 apr_thread_cond_t *wait_for_idler;
31 recycled_pool *recycled_pools;
34 static apr_status_t queue_info_cleanup(void *data_)
36 fd_queue_info_t *qi = data_;
37 apr_thread_cond_destroy(qi->wait_for_idler);
38 apr_thread_mutex_destroy(qi->idlers_mutex);
40 /* Clean up any pools in the recycled list */
42 struct recycled_pool *first_pool = qi->recycled_pools;
43 if (first_pool == NULL) {
46 if (apr_atomic_casptr((volatile void**)&(qi->recycled_pools), first_pool->next,
47 first_pool) == first_pool) {
48 apr_pool_destroy(first_pool->pool);
55 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
56 apr_pool_t *pool, int max_idlers)
61 qi = apr_pcalloc(pool, sizeof(*qi));
63 rv = apr_thread_mutex_create(&qi->idlers_mutex, APR_THREAD_MUTEX_DEFAULT,
65 if (rv != APR_SUCCESS) {
68 rv = apr_thread_cond_create(&qi->wait_for_idler, pool);
69 if (rv != APR_SUCCESS) {
72 qi->recycled_pools = NULL;
73 qi->max_idlers = max_idlers;
74 apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
75 apr_pool_cleanup_null);
82 apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
83 apr_pool_t *pool_to_recycle)
88 /* If we have been given a pool to recycle, atomically link
89 * it into the queue_info's list of recycled pools
91 if (pool_to_recycle) {
92 struct recycled_pool *new_recycle;
93 new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle,
94 sizeof(*new_recycle));
95 new_recycle->pool = pool_to_recycle;
97 /* Save queue_info->recycled_pool in local variable next because
98 * new_recycle->next can be changed after apr_atomic_casptr
99 * function call. For gory details see PR 44402.
101 struct recycled_pool *next = queue_info->recycled_pools;
102 new_recycle->next = next;
103 if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools),
104 new_recycle, next) == next) {
110 /* Atomically increment the count of idle workers */
112 prev_idlers = queue_info->idlers;
113 if (apr_atomic_cas32(&(queue_info->idlers), prev_idlers + 1,
114 prev_idlers) == prev_idlers) {
119 /* If this thread just made the idle worker count nonzero,
120 * wake up the listener. */
121 if (prev_idlers == 0) {
122 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
123 if (rv != APR_SUCCESS) {
126 rv = apr_thread_cond_signal(queue_info->wait_for_idler);
127 if (rv != APR_SUCCESS) {
128 apr_thread_mutex_unlock(queue_info->idlers_mutex);
131 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
132 if (rv != APR_SUCCESS) {
140 apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
141 apr_pool_t **recycled_pool)
145 *recycled_pool = NULL;
147 /* Block if the count of idle workers is zero */
148 if (queue_info->idlers == 0) {
149 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
150 if (rv != APR_SUCCESS) {
153 /* Re-check the idle worker count to guard against a
154 * race condition. Now that we're in the mutex-protected
155 * region, one of two things may have happened:
156 * - If the idle worker count is still zero, the
157 * workers are all still busy, so it's safe to
158 * block on a condition variable.
159 * - If the idle worker count is nonzero, then a
160 * worker has become idle since the first check
161 * of queue_info->idlers above. It's possible
162 * that the worker has also signaled the condition
163 * variable--and if so, the listener missed it
164 * because it wasn't yet blocked on the condition
165 * variable. But if the idle worker count is
166 * now nonzero, it's safe for this function to
167 * return immediately.
169 if (queue_info->idlers == 0) {
170 rv = apr_thread_cond_wait(queue_info->wait_for_idler,
171 queue_info->idlers_mutex);
172 if (rv != APR_SUCCESS) {
174 rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
175 if (rv2 != APR_SUCCESS) {
181 rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
182 if (rv != APR_SUCCESS) {
187 /* Atomically decrement the idle worker count */
188 apr_atomic_dec32(&(queue_info->idlers));
190 /* Atomically pop a pool from the recycled list */
192 /* This function is safe only as long as it is single threaded because
193 * it reaches into the queue and accesses "next" which can change.
194 * We are OK today because it is only called from the listener thread.
195 * cas-based pushes do not have the same limitation - any number can
196 * happen concurrently with a single cas-based pop.
200 struct recycled_pool *first_pool = queue_info->recycled_pools;
201 if (first_pool == NULL) {
204 if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools), first_pool->next,
205 first_pool) == first_pool) {
206 *recycled_pool = first_pool->pool;
211 if (queue_info->terminated) {
219 apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
222 rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
223 if (rv != APR_SUCCESS) {
226 queue_info->terminated = 1;
227 apr_thread_cond_broadcast(queue_info->wait_for_idler);
228 return apr_thread_mutex_unlock(queue_info->idlers_mutex);
232 * Detects when the fd_queue_t is full. This utility function is expected
233 * to be called from within critical sections, and is not threadsafe.
235 #define ap_queue_full(queue) ((queue)->nelts == (queue)->bounds)
238 * Detects when the fd_queue_t is empty. This utility function is expected
239 * to be called from within critical sections, and is not threadsafe.
241 #define ap_queue_empty(queue) ((queue)->nelts == 0)
244 * Callback routine that is called to destroy this
245 * fd_queue_t when its pool is destroyed.
247 static apr_status_t ap_queue_destroy(void *data)
249 fd_queue_t *queue = data;
251 /* Ignore errors here, we can't do anything about them anyway.
252 * XXX: We should at least try to signal an error here, it is
253 * indicative of a programmer error. -aaron */
254 apr_thread_cond_destroy(queue->not_empty);
255 apr_thread_mutex_destroy(queue->one_big_mutex);
261 * Initialize the fd_queue_t.
263 apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
268 if ((rv = apr_thread_mutex_create(&queue->one_big_mutex,
269 APR_THREAD_MUTEX_DEFAULT, a)) != APR_SUCCESS) {
272 if ((rv = apr_thread_cond_create(&queue->not_empty, a)) != APR_SUCCESS) {
276 queue->data = apr_palloc(a, queue_capacity * sizeof(fd_queue_elem_t));
277 queue->bounds = queue_capacity;
280 /* Set all the sockets in the queue to NULL */
281 for (i = 0; i < queue_capacity; ++i)
282 queue->data[i].sd = NULL;
284 apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
290 * Push a new socket onto the queue.
292 * precondition: ap_queue_info_wait_for_idler has already been called
293 * to reserve an idle worker thread
295 apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
297 fd_queue_elem_t *elem;
300 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
304 AP_DEBUG_ASSERT(!queue->terminated);
305 AP_DEBUG_ASSERT(!ap_queue_full(queue));
307 elem = &queue->data[queue->nelts];
312 apr_thread_cond_signal(queue->not_empty);
314 if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
322 * Retrieves the next available socket from the queue. If there are no
323 * sockets available, it will block until one becomes available.
324 * Once retrieved, the socket is placed into the address specified by
327 apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
329 fd_queue_elem_t *elem;
332 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
336 /* Keep waiting until we wake up and find that the queue is not empty. */
337 if (ap_queue_empty(queue)) {
338 if (!queue->terminated) {
339 apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
341 /* If we wake up and it's still empty, then we were interrupted */
342 if (ap_queue_empty(queue)) {
343 rv = apr_thread_mutex_unlock(queue->one_big_mutex);
344 if (rv != APR_SUCCESS) {
347 if (queue->terminated) {
348 return APR_EOF; /* no more elements ever again */
356 elem = &queue->data[--queue->nelts];
362 #endif /* AP_DEBUG */
364 rv = apr_thread_mutex_unlock(queue->one_big_mutex);
368 apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
372 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
375 apr_thread_cond_broadcast(queue->not_empty);
376 return apr_thread_mutex_unlock(queue->one_big_mutex);
379 apr_status_t ap_queue_term(fd_queue_t *queue)
383 if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
386 /* we must hold one_big_mutex when setting this... otherwise,
387 * we could end up setting it and waking everybody up just after a
388 * would-be popper checks it but right before they block
390 queue->terminated = 1;
391 if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
394 return ap_queue_interrupt_all(queue);