Changes with Apache 2.0.32-dev
+ *) Performance: Reuse per-connection transaction pools in the
+ worker MPM, rather than destroying and recreating them. [Brian Pane]
+
*) mod_negotiation: ForceLanguagePriority now uses 'Prefer' as the
default if the directive is not specified. This mirrors older
behavior without changes to the httpd.conf. [William Rowe]
APACHE 2.0 STATUS: -*-text-*-
-Last modified at [$Date: 2002/02/10 21:16:25 $]
+Last modified at [$Date: 2002/02/13 04:49:55 $]
Release:
when things calm down a little. It looks OK when
there are complete lines and no mime continuations.
- * Modify the worker MPM so that it doesn't need to create and
- destroy a pool for each request--possibly by adopting a
- leader/follower model in which each worker owns a persistent
- ptrans pool (like the prefork MPM) and the workers take
- turns acting as listeners...this approach might also help
- reduce context-switching
-
* CGI single-byte reads
BrianP suggests that this is caused by the ap_scan_script_header_err()
routine, which will do single-byte reads until it finds the end
for (i = 0; i < queue_capacity; ++i)
queue->data[i].sd = NULL;
+ queue->recycled_pools = apr_palloc(a,
+ queue_capacity * sizeof(apr_pool_t *));
+ queue->num_recycled = 0;
+
apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
return FD_QUEUE_SUCCESS;
* the push operation has completed, it signals other threads waiting
* in apr_queue_pop() that they may continue consuming sockets.
*/
-int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
+int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
+ apr_pool_t **recycled_pool)
{
fd_queue_elem_t *elem;
+ *recycled_pool = NULL;
if (apr_thread_mutex_lock(queue->one_big_mutex) != APR_SUCCESS) {
return FD_QUEUE_FAILURE;
}
elem->sd = sd;
elem->p = p;
+ if (queue->num_recycled != 0) {
+ *recycled_pool = queue->recycled_pools[--queue->num_recycled];
+ }
+
apr_thread_cond_signal(queue->not_empty);
if (apr_thread_mutex_unlock(queue->one_big_mutex) != APR_SUCCESS) {
* Once retrieved, the socket is placed into the address specified by
* 'sd'.
*/
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
+ apr_pool_t *recycled_pool)
{
fd_queue_elem_t *elem;
if (apr_thread_mutex_lock(queue->one_big_mutex) != APR_SUCCESS) {
+ if (recycled_pool) {
+ apr_pool_destroy(recycled_pool);
+ }
return FD_QUEUE_FAILURE;
}
+ if (recycled_pool) {
+ if (queue->num_recycled < queue->bounds) {
+ queue->recycled_pools[queue->num_recycled++] = recycled_pool;
+ }
+ else {
+ apr_pool_destroy(recycled_pool);
+ }
+ }
+
/* Keep waiting until we wake up and find that the queue is not empty. */
if (ap_queue_empty(queue)) {
apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
apr_thread_cond_t *not_empty;
apr_thread_cond_t *not_full;
int cancel_state;
+ apr_pool_t **recycled_pools;
+ int num_recycled;
};
typedef struct fd_queue_t fd_queue_t;
/* FIXME: APRize these -- return values should be apr_status_t */
int ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a);
-int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p);
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p);
+int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
+ apr_pool_t **recycled_pool);
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
+ apr_pool_t *recycled_pool);
apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
#endif /* FDQUEUE_H */
apr_pool_t *tpool = apr_thread_pool_get(thd);
void *csd = NULL;
apr_pool_t *ptrans; /* Pool for per-transaction stuff */
+ apr_pool_t *recycled_pool = NULL;
int n;
apr_pollfd_t *pollset;
apr_status_t rv;
got_fd:
if (!workers_may_exit) {
/* create a new transaction pool for each accepted socket */
- apr_pool_create_ex(&ptrans, NULL, NULL, APR_POOL_FNEW_ALLOCATOR);
+ if (recycled_pool == NULL) {
+ apr_pool_create_ex(&ptrans, NULL, NULL, APR_POOL_FNEW_ALLOCATOR);
+ }
+ else {
+ ptrans = recycled_pool;
+ }
apr_pool_tag(ptrans, "transaction");
-
rv = lr->accept_func(&csd, lr, ptrans);
if (rv == APR_EGENERAL) {
signal_workers();
}
if (csd != NULL) {
- rv = ap_queue_push(worker_queue, csd, ptrans);
+ rv = ap_queue_push(worker_queue, csd, ptrans,
+ &recycled_pool);
if (rv) {
/* trash the connection; we couldn't queue the connected
* socket to a worker
int process_slot = ti->pid;
int thread_slot = ti->tid;
apr_socket_t *csd = NULL;
+ apr_pool_t *last_ptrans = NULL;
apr_pool_t *ptrans; /* Pool for per-transaction stuff */
apr_status_t rv;
ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_STARTING, NULL);
while (!workers_may_exit) {
ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_READY, NULL);
- rv = ap_queue_pop(worker_queue, &csd, &ptrans);
+ rv = ap_queue_pop(worker_queue, &csd, &ptrans, last_ptrans);
+ last_ptrans = NULL;
+
/* We get FD_QUEUE_EINTR whenever ap_queue_pop() has been interrupted
* from an explicit call to ap_queue_interrupt_all(). This allows
* us to unblock threads stuck in ap_queue_pop() when a shutdown
}
process_socket(ptrans, csd, process_slot, thread_slot);
requests_this_child--; /* FIXME: should be synchronized - aaron */
- apr_pool_destroy(ptrans);
+ apr_pool_clear(ptrans);
+ last_ptrans = ptrans;
}
ap_update_child_status_from_indexes(process_slot, thread_slot,