apr_thread_mutex_t *idlers_mutex;
apr_thread_cond_t *wait_for_idler;
int terminated;
+ int max_idlers;
+ apr_pool_t **recycled_pools;
+ int num_recycled;
};
static apr_status_t queue_info_cleanup(void *data_)
{
fd_queue_info_t *qi = data_;
+ int i;
apr_thread_cond_destroy(qi->wait_for_idler);
apr_thread_mutex_destroy(qi->idlers_mutex);
+ for (i = 0; i < qi->num_recycled; i++) {
+ apr_pool_destroy(qi->recycled_pools[i]);
+ }
return APR_SUCCESS;
}
apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
- apr_pool_t *pool)
+ apr_pool_t *pool, int max_idlers)
{
apr_status_t rv;
fd_queue_info_t *qi;
if (rv != APR_SUCCESS) {
return rv;
}
+ qi->recycled_pools = (apr_pool_t **)apr_palloc(pool, max_idlers *
+ sizeof(apr_pool_t *));
+ qi->num_recycled = 0;
+ qi->max_idlers = max_idlers;
apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
apr_pool_cleanup_null);
return APR_SUCCESS;
}
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info)
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+ apr_pool_t *pool_to_recycle)
{
apr_status_t rv;
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
return rv;
}
AP_DEBUG_ASSERT(queue_info->idlers >= 0);
+ AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
+ if (pool_to_recycle) {
+ queue_info->recycled_pools[queue_info->num_recycled++] =
+ pool_to_recycle;
+ }
if (queue_info->idlers++ == 0) {
/* Only signal if we had no idlers before. */
apr_thread_cond_signal(queue_info->wait_for_idler);
return APR_SUCCESS;
}
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info)
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
+ apr_pool_t **recycled_pool)
{
apr_status_t rv;
+ *recycled_pool = NULL;
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
return rv;
}
}
queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
+ if (queue_info->num_recycled) {
+ *recycled_pool =
+ queue_info->recycled_pools[--queue_info->num_recycled];
+ }
rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
return rv;
for (i = 0; i < queue_capacity; ++i)
queue->data[i].sd = NULL;
- queue->recycled_pools = apr_palloc(a,
- queue_capacity * sizeof(apr_pool_t *));
- queue->num_recycled = 0;
-
apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
return APR_SUCCESS;
* the push operation has completed, it signals other threads waiting
* in apr_queue_pop() that they may continue consuming sockets.
*/
-apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
- apr_pool_t **recycled_pool)
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
{
fd_queue_elem_t *elem;
apr_status_t rv;
- *recycled_pool = NULL;
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
return rv;
}
elem->p = p;
queue->nelts++;
- if (queue->num_recycled != 0) {
- *recycled_pool = queue->recycled_pools[--queue->num_recycled];
- }
-
apr_thread_cond_signal(queue->not_empty);
if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
* Once retrieved, the socket is placed into the address specified by
* 'sd'.
*/
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
- apr_pool_t *recycled_pool)
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
{
fd_queue_elem_t *elem;
apr_status_t rv;
- int delete_pool = 0;
if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
- if (recycled_pool) {
- apr_pool_destroy(recycled_pool);
- }
return rv;
}
- if (recycled_pool) {
- if (queue->num_recycled < queue->bounds) {
- queue->recycled_pools[queue->num_recycled++] = recycled_pool;
- }
- else {
- delete_pool = 1;
- }
- }
-
/* Keep waiting until we wake up and find that the queue is not empty. */
if (ap_queue_empty(queue)) {
if (!queue->terminated) {
/* If we wake up and it's still empty, then we were interrupted */
if (ap_queue_empty(queue)) {
rv = apr_thread_mutex_unlock(queue->one_big_mutex);
- if (delete_pool) {
- apr_pool_destroy(recycled_pool);
- }
if (rv != APR_SUCCESS) {
return rv;
}
}
rv = apr_thread_mutex_unlock(queue->one_big_mutex);
- if (delete_pool) {
- apr_pool_destroy(recycled_pool);
- }
return rv;
}
typedef struct fd_queue_info_t fd_queue_info_t;
apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
- apr_pool_t *pool);
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info);
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info);
+ apr_pool_t *pool, int max_idlers);
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+ apr_pool_t *pool_to_recycle);
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
+ apr_pool_t **recycled_pool);
apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info);
struct fd_queue_elem_t {
apr_thread_mutex_t *one_big_mutex;
apr_thread_cond_t *not_empty;
apr_thread_cond_t *not_full;
- apr_pool_t **recycled_pools;
- int num_recycled;
int terminated;
};
typedef struct fd_queue_t fd_queue_t;
apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a);
-apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
- apr_pool_t **recycled_pool);
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
- apr_pool_t *recycled_pool);
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p);
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p);
apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
apr_status_t ap_queue_term(fd_queue_t *queue);
}
if (listener_may_exit) break;
- rv = ap_queue_info_wait_for_idler(worker_queue_info);
+ rv = ap_queue_info_wait_for_idler(worker_queue_info,
+ &recycled_pool);
if (APR_STATUS_IS_EOF(rv)) {
break; /* we've been signaled to die now */
}
signal_threads(ST_GRACEFUL);
}
if (csd != NULL) {
- rv = ap_queue_push(worker_queue, csd, ptrans,
- &recycled_pool);
+ rv = ap_queue_push(worker_queue, csd, ptrans);
if (rv) {
/* trash the connection; we couldn't queue the connected
* socket to a worker
bucket_alloc = apr_bucket_alloc_create(apr_thread_pool_get(thd));
while (!workers_may_exit) {
- rv = ap_queue_info_set_idle(worker_queue_info);
+ rv = ap_queue_info_set_idle(worker_queue_info, last_ptrans);
+ last_ptrans = NULL;
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
"ap_queue_info_set_idle failed. Attempting to "
}
ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_READY, NULL);
- rv = ap_queue_pop(worker_queue, &csd, &ptrans, last_ptrans);
- last_ptrans = NULL;
+ rv = ap_queue_pop(worker_queue, &csd, &ptrans);
if (rv != APR_SUCCESS) {
/* We get APR_EOF during a graceful shutdown once all the connections
clean_child_exit(APEXIT_CHILDFATAL);
}
- rv = ap_queue_info_create(&worker_queue_info, pchild);
+ rv = ap_queue_info_create(&worker_queue_info, pchild,
+ ap_threads_per_child);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"ap_queue_info_create() failed");