]> granicus.if.org Git - apache/commitdiff
Moved the recycled pool list from the queue to the queue_info structure.
authorBrian Pane <brianp@apache.org>
Sun, 28 Apr 2002 05:28:18 +0000 (05:28 +0000)
committerBrian Pane <brianp@apache.org>
Sun, 28 Apr 2002 05:28:18 +0000 (05:28 +0000)
The advantage of doing this is that it enables us to guarantee that the
number of ptrans pools in existence at once is no greater than the
number of worker threads, and that we'll never have to delete ptrans
pools.

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@94830 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
server/mpm/worker/fdqueue.c
server/mpm/worker/fdqueue.h
server/mpm/worker/worker.c

diff --git a/CHANGES b/CHANGES
index 0d5c1bee30f1297249afa9f4bcc0e5da53ea4e00..159ba3b0a6baada13f9d7380717c29fc104be05b 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,5 +1,7 @@
 Changes with Apache 2.0.37
 
+  *) More efficient pool recycling logic for the worker MPM [Brian Pane]
+
   *) Modify the worker MPM to not accept() new connections until
      there is an available worker thread. This prevents queued
      connections from starving for processing time while long-running
index a7f9bc285718c3c38a8bc03408b08bc49453d46d..362583986e136cdd4aa5d83d37c0bd5fbe7c2835 100644 (file)
@@ -63,18 +63,25 @@ struct fd_queue_info_t {
     apr_thread_mutex_t *idlers_mutex;
     apr_thread_cond_t *wait_for_idler;
     int terminated;
+    int max_idlers;
+    apr_pool_t        **recycled_pools;
+    int num_recycled;
 };
 
 static apr_status_t queue_info_cleanup(void *data_)
 {
     fd_queue_info_t *qi = data_;
+    int i;
     apr_thread_cond_destroy(qi->wait_for_idler);
     apr_thread_mutex_destroy(qi->idlers_mutex);
+    for (i = 0; i < qi->num_recycled; i++) {
+        apr_pool_destroy(qi->recycled_pools[i]);
+    }
     return APR_SUCCESS;
 }
 
 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
-                                  apr_pool_t *pool)
+                                  apr_pool_t *pool, int max_idlers)
 {
     apr_status_t rv;
     fd_queue_info_t *qi;
@@ -91,6 +98,10 @@ apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
     if (rv != APR_SUCCESS) {
         return rv;
     }
+    qi->recycled_pools = (apr_pool_t **)apr_palloc(pool, max_idlers *
+                                                   sizeof(apr_pool_t *));
+    qi->num_recycled = 0;
+    qi->max_idlers = max_idlers;
     apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
                               apr_pool_cleanup_null);
 
@@ -99,7 +110,8 @@ apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
     return APR_SUCCESS;
 }
 
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info)
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+                                    apr_pool_t *pool_to_recycle)
 {
     apr_status_t rv;
     rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
@@ -107,6 +119,11 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info)
         return rv;
     }
     AP_DEBUG_ASSERT(queue_info->idlers >= 0);
+    AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
+    if (pool_to_recycle) {
+        queue_info->recycled_pools[queue_info->num_recycled++] =
+            pool_to_recycle;
+    }
     if (queue_info->idlers++ == 0) {
         /* Only signal if we had no idlers before. */
         apr_thread_cond_signal(queue_info->wait_for_idler);
@@ -118,9 +135,11 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info)
     return APR_SUCCESS;
 }
 
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info)
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
+                                          apr_pool_t **recycled_pool)
 {
     apr_status_t rv;
+    *recycled_pool = NULL;
     rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
     if (rv != APR_SUCCESS) {
         return rv;
@@ -139,6 +158,10 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info)
         }
     }
     queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
+    if (queue_info->num_recycled) {
+        *recycled_pool =
+            queue_info->recycled_pools[--queue_info->num_recycled];
+    }
     rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
     if (rv != APR_SUCCESS) {
         return rv;
@@ -225,10 +248,6 @@ apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
     for (i = 0; i < queue_capacity; ++i)
         queue->data[i].sd = NULL;
 
-    queue->recycled_pools = apr_palloc(a,
-                                       queue_capacity * sizeof(apr_pool_t *));
-    queue->num_recycled = 0;
-
     apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
 
     return APR_SUCCESS;
@@ -239,13 +258,11 @@ apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
  * the push operation has completed, it signals other threads waiting
  * in apr_queue_pop() that they may continue consuming sockets.
  */
-apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
-                           apr_pool_t **recycled_pool)
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
 {
     fd_queue_elem_t *elem;
     apr_status_t rv;
 
-    *recycled_pool = NULL;
     if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
         return rv;
     }
@@ -262,10 +279,6 @@ apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
     elem->p = p;
     queue->nelts++;
 
-    if (queue->num_recycled != 0) {
-        *recycled_pool = queue->recycled_pools[--queue->num_recycled];
-    }
-
     apr_thread_cond_signal(queue->not_empty);
 
     if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
@@ -281,29 +294,15 @@ apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
  * Once retrieved, the socket is placed into the address specified by
  * 'sd'.
  */
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
-                          apr_pool_t *recycled_pool) 
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
 {
     fd_queue_elem_t *elem;
     apr_status_t rv;
-    int delete_pool = 0;
 
     if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
-        if (recycled_pool) {
-            apr_pool_destroy(recycled_pool);
-        }
         return rv;
     }
 
-    if (recycled_pool) {
-        if (queue->num_recycled < queue->bounds) {
-            queue->recycled_pools[queue->num_recycled++] = recycled_pool;
-        }
-        else {
-            delete_pool = 1;
-        }
-    }
-
     /* Keep waiting until we wake up and find that the queue is not empty. */
     if (ap_queue_empty(queue)) {
         if (!queue->terminated) {
@@ -312,9 +311,6 @@ apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
         /* If we wake up and it's still empty, then we were interrupted */
         if (ap_queue_empty(queue)) {
             rv = apr_thread_mutex_unlock(queue->one_big_mutex);
-            if (delete_pool) {
-                apr_pool_destroy(recycled_pool);
-            }
             if (rv != APR_SUCCESS) {
                 return rv;
             }
@@ -341,9 +337,6 @@ apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
     }
 
     rv = apr_thread_mutex_unlock(queue->one_big_mutex);
-    if (delete_pool) {
-        apr_pool_destroy(recycled_pool);
-    }
     return rv;
 }
 
index c7d5cab1c741a528bb12b4496befee17110f95f9..4e72358188fd494a751f4ddad2975638ca4a4131 100644 (file)
 typedef struct fd_queue_info_t fd_queue_info_t;
 
 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
-                                  apr_pool_t *pool);
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info);
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info);
+                                  apr_pool_t *pool, int max_idlers);
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+                                    apr_pool_t *pool_to_recycle);
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
+                                          apr_pool_t **recycled_pool);
 apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info);
 
 struct fd_queue_elem_t {
@@ -94,17 +96,13 @@ struct fd_queue_t {
     apr_thread_mutex_t *one_big_mutex;
     apr_thread_cond_t  *not_empty;
     apr_thread_cond_t  *not_full;
-    apr_pool_t        **recycled_pools;
-    int                 num_recycled;
     int                 terminated;
 };
 typedef struct fd_queue_t fd_queue_t;
 
 apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a);
-apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
-                           apr_pool_t **recycled_pool);
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
-                          apr_pool_t *recycled_pool);
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p);
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p);
 apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
 apr_status_t ap_queue_term(fd_queue_t *queue);
 
index 378d8a56da78039f6a30d7b682f871f55f8950d7..c6111616db46592e44011c72284848e3ce98e37e 100644 (file)
@@ -695,7 +695,8 @@ static void *listener_thread(apr_thread_t *thd, void * dummy)
         }
         if (listener_may_exit) break;
 
-        rv = ap_queue_info_wait_for_idler(worker_queue_info);
+        rv = ap_queue_info_wait_for_idler(worker_queue_info,
+                                          &recycled_pool);
         if (APR_STATUS_IS_EOF(rv)) {
             break; /* we've been signaled to die now */
         }
@@ -807,8 +808,7 @@ static void *listener_thread(apr_thread_t *thd, void * dummy)
                 signal_threads(ST_GRACEFUL);
             }
             if (csd != NULL) {
-                rv = ap_queue_push(worker_queue, csd, ptrans,
-                                   &recycled_pool);
+                rv = ap_queue_push(worker_queue, csd, ptrans);
                 if (rv) {
                     /* trash the connection; we couldn't queue the connected
                      * socket to a worker 
@@ -867,7 +867,8 @@ static void * APR_THREAD_FUNC worker_thread(apr_thread_t *thd, void * dummy)
     bucket_alloc = apr_bucket_alloc_create(apr_thread_pool_get(thd));
 
     while (!workers_may_exit) {
-        rv = ap_queue_info_set_idle(worker_queue_info);
+        rv = ap_queue_info_set_idle(worker_queue_info, last_ptrans);
+        last_ptrans = NULL;
         if (rv != APR_SUCCESS) {
             ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
                          "ap_queue_info_set_idle failed. Attempting to "
@@ -877,8 +878,7 @@ static void * APR_THREAD_FUNC worker_thread(apr_thread_t *thd, void * dummy)
         }
 
         ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_READY, NULL);
-        rv = ap_queue_pop(worker_queue, &csd, &ptrans, last_ptrans);
-        last_ptrans = NULL;
+        rv = ap_queue_pop(worker_queue, &csd, &ptrans);
 
         if (rv != APR_SUCCESS) {
             /* We get APR_EOF during a graceful shutdown once all the connections
@@ -986,7 +986,8 @@ static void * APR_THREAD_FUNC start_threads(apr_thread_t *thd, void *dummy)
         clean_child_exit(APEXIT_CHILDFATAL);
     }
 
-    rv = ap_queue_info_create(&worker_queue_info, pchild);
+    rv = ap_queue_info_create(&worker_queue_info, pchild,
+                              ap_threads_per_child);
     if (rv != APR_SUCCESS) {
         ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
                      "ap_queue_info_create() failed");