]> granicus.if.org Git - apache/commitdiff
Performance optimization: updated the worker MPM to recycle
authorBrian Pane <brianp@apache.org>
Wed, 13 Feb 2002 04:49:55 +0000 (04:49 +0000)
committerBrian Pane <brianp@apache.org>
Wed, 13 Feb 2002 04:49:55 +0000 (04:49 +0000)
per-transaction pools instead of destroying them.  Based on
Ian's benchmark testing, this reduces CPU utilization by
about 1% on Solaris.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@93386 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
STATUS
server/mpm/worker/fdqueue.c
server/mpm/worker/fdqueue.h
server/mpm/worker/worker.c

diff --git a/CHANGES b/CHANGES
index 47dcf80a2d7c03516d8be197c5aebcf35fb835e1..ea955c235410ac0e6de1141514ee6eadff07ddc0 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,8 @@
 Changes with Apache 2.0.32-dev
 
+  *) Performance: Reuse per-connection transaction pools in the
+     worker MPM, rather than destroying and recreating them.  [Brian Pane]
+
   *) mod_negotiation: ForceLanguagePriority now uses 'Prefer' as the
      default if the directive is not specified.  This mirrors older
      behavior without changes to the httpd.conf.  [William Rowe]
diff --git a/STATUS b/STATUS
index 06c7ff07fcccfdbb2c63b3414d8021c699ae778a..cfa8f9f97ea81df3707c76a79d0f1d1a67cc1dfa 100644 (file)
--- a/STATUS
+++ b/STATUS
@@ -1,5 +1,5 @@
 APACHE 2.0 STATUS:                                              -*-text-*-
-Last modified at [$Date: 2002/02/10 21:16:25 $]
+Last modified at [$Date: 2002/02/13 04:49:55 $]
 
 Release:
 
@@ -186,13 +186,6 @@ RELEASE NON-SHOWSTOPPERS BUT WOULD BE REAL NICE TO WRAP THESE UP:
                        when things calm down a little.  It looks OK when 
                        there are complete lines and no mime continuations.
 
-    * Modify the worker MPM so that it doesn't need to create and
-      destroy a pool for each request--possibly by adopting a
-      leader/follower model in which each worker owns a persistent
-      ptrans pool (like the prefork MPM) and the workers take
-      turns acting as listeners...this approach might also help
-      reduce context-switching
-
     * CGI single-byte reads
       BrianP suggests that this is caused by the ap_scan_script_header_err()
       routine, which will do single-byte reads until it finds the end
index be4b19311e58932f0ca47e399c7d3bfad7e8afcc..338588e0492c785f6adbc454a24f0236b2605172 100644 (file)
@@ -112,6 +112,10 @@ int ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
     for (i = 0; i < queue_capacity; ++i)
         queue->data[i].sd = NULL;
 
+    queue->recycled_pools = apr_palloc(a,
+                                       queue_capacity * sizeof(apr_pool_t *));
+    queue->num_recycled = 0;
+
     apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
 
     return FD_QUEUE_SUCCESS;
@@ -122,10 +126,12 @@ int ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
  * the push operation has completed, it signals other threads waiting
  * in apr_queue_pop() that they may continue consuming sockets.
  */
-int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p) 
+int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
+                  apr_pool_t **recycled_pool)
 {
     fd_queue_elem_t *elem;
 
+    *recycled_pool = NULL;
     if (apr_thread_mutex_lock(queue->one_big_mutex) != APR_SUCCESS) {
         return FD_QUEUE_FAILURE;
     }
@@ -138,6 +144,10 @@ int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
     elem->sd = sd;
     elem->p = p;
 
+    if (queue->num_recycled != 0) {
+        *recycled_pool = queue->recycled_pools[--queue->num_recycled];
+    }
+
     apr_thread_cond_signal(queue->not_empty);
 
     if (apr_thread_mutex_unlock(queue->one_big_mutex) != APR_SUCCESS) {
@@ -153,14 +163,27 @@ int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
  * Once retrieved, the socket is placed into the address specified by
  * 'sd'.
  */
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p) 
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
+                          apr_pool_t *recycled_pool) 
 {
     fd_queue_elem_t *elem;
 
     if (apr_thread_mutex_lock(queue->one_big_mutex) != APR_SUCCESS) {
+        if (recycled_pool) {
+            apr_pool_destroy(recycled_pool);
+        }
         return FD_QUEUE_FAILURE;
     }
 
+    if (recycled_pool) {
+        if (queue->num_recycled < queue->bounds) {
+            queue->recycled_pools[queue->num_recycled++] = recycled_pool;
+        }
+        else {
+            apr_pool_destroy(recycled_pool);
+        }
+    }
+
     /* Keep waiting until we wake up and find that the queue is not empty. */
     if (ap_queue_empty(queue)) {
         apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
index 627b09e9a4d3a8d38683656526ae130631f321ca..ad0064a741dac3f4ad7fc854957802b655db3ff6 100644 (file)
@@ -91,13 +91,17 @@ struct fd_queue_t {
     apr_thread_cond_t  *not_empty;
     apr_thread_cond_t  *not_full;
     int                 cancel_state;
+    apr_pool_t        **recycled_pools;
+    int                 num_recycled;
 };
 typedef struct fd_queue_t fd_queue_t;
 
 /* FIXME: APRize these -- return values should be apr_status_t */
 int ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a);
-int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p);
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p);
+int ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
+                  apr_pool_t **recycled_pool);
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
+                          apr_pool_t *recycled_pool);
 apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
 
 #endif /* FDQUEUE_H */
index 01607843ff8923c8beab4afd0143ad5314a7cd42..5cd3bfabf6c442d4ab51db840b739ef1da086800 100644 (file)
@@ -595,6 +595,7 @@ static void *listener_thread(apr_thread_t *thd, void * dummy)
     apr_pool_t *tpool = apr_thread_pool_get(thd);
     void *csd = NULL;
     apr_pool_t *ptrans;                /* Pool for per-transaction stuff */
+    apr_pool_t *recycled_pool = NULL;
     int n;
     apr_pollfd_t *pollset;
     apr_status_t rv;
@@ -670,9 +671,13 @@ static void *listener_thread(apr_thread_t *thd, void * dummy)
     got_fd:
         if (!workers_may_exit) {
             /* create a new transaction pool for each accepted socket */
-            apr_pool_create_ex(&ptrans, NULL, NULL, APR_POOL_FNEW_ALLOCATOR);
+            if (recycled_pool == NULL) {
+                apr_pool_create_ex(&ptrans, NULL, NULL, APR_POOL_FNEW_ALLOCATOR);
+            }
+            else {
+                ptrans = recycled_pool;
+            }
             apr_pool_tag(ptrans, "transaction");
-
             rv = lr->accept_func(&csd, lr, ptrans);
 
             if (rv == APR_EGENERAL) {
@@ -688,7 +693,8 @@ static void *listener_thread(apr_thread_t *thd, void * dummy)
                 signal_workers();
             }
             if (csd != NULL) {
-                rv = ap_queue_push(worker_queue, csd, ptrans);
+                rv = ap_queue_push(worker_queue, csd, ptrans,
+                                   &recycled_pool);
                 if (rv) {
                     /* trash the connection; we couldn't queue the connected
                      * socket to a worker 
@@ -729,6 +735,7 @@ static void * APR_THREAD_FUNC worker_thread(apr_thread_t *thd, void * dummy)
     int process_slot = ti->pid;
     int thread_slot = ti->tid;
     apr_socket_t *csd = NULL;
+    apr_pool_t *last_ptrans = NULL;
     apr_pool_t *ptrans;                /* Pool for per-transaction stuff */
     apr_status_t rv;
 
@@ -737,7 +744,9 @@ static void * APR_THREAD_FUNC worker_thread(apr_thread_t *thd, void * dummy)
     ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_STARTING, NULL);
     while (!workers_may_exit) {
         ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_READY, NULL);
-        rv = ap_queue_pop(worker_queue, &csd, &ptrans);
+        rv = ap_queue_pop(worker_queue, &csd, &ptrans, last_ptrans);
+        last_ptrans = NULL;
+
         /* We get FD_QUEUE_EINTR whenever ap_queue_pop() has been interrupted
          * from an explicit call to ap_queue_interrupt_all(). This allows
          * us to unblock threads stuck in ap_queue_pop() when a shutdown
@@ -747,7 +756,8 @@ static void * APR_THREAD_FUNC worker_thread(apr_thread_t *thd, void * dummy)
         }
         process_socket(ptrans, csd, process_slot, thread_slot);
         requests_this_child--; /* FIXME: should be synchronized - aaron */
-        apr_pool_destroy(ptrans);
+        apr_pool_clear(ptrans);
+        last_ptrans = ptrans;
     }
 
     ap_update_child_status_from_indexes(process_slot, thread_slot,