Allow the load balancing method to "create" workers.
authorJean-Frederic Clere <jfclere@apache.org>
Wed, 13 May 2009 15:27:05 +0000 (15:27 +0000)
committerJean-Frederic Clere <jfclere@apache.org>
Wed, 13 May 2009 15:27:05 +0000 (15:27 +0000)
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@774400 13f79535-47bb-0310-9956-ffa450edef68

modules/proxy/mod_proxy.c
modules/proxy/mod_proxy.h
modules/proxy/mod_proxy_balancer.c
modules/proxy/proxy_util.c

index 06867d3baa32e6ee2ec68a861020c41c5725270c..bc1ff522e35c97acefdd00b176ee89d0ec622ce9 100644 (file)
@@ -940,6 +940,13 @@ static int proxy_handler(request_rec *r)
                 balancer = NULL;
             goto cleanup;
         }
+
+        /* Initialise worker if needed, note the shared area must be initialized by the balancer logic */
+        if (balancer) {
+            ap_proxy_initialize_worker(worker, r->server, conf->pool); 
+            ap_proxy_initialize_worker_share(conf, worker, r->server);
+        }
+
         if (balancer && balancer->max_attempts_set && !max_attempts)
             max_attempts = balancer->max_attempts;
         /* firstly, try a proxy, unless a NoProxy directive is active */
@@ -2295,7 +2302,7 @@ static void child_init(apr_pool_t *p, server_rec *s)
         worker = (proxy_worker *)conf->workers->elts;
         for (i = 0; i < conf->workers->nelts; i++) {
             ap_proxy_initialize_worker_share(conf, worker, s);
-            ap_proxy_initialize_worker(worker, s);
+            ap_proxy_initialize_worker(worker, s, p);
             worker++;
         }
         /* Create and initialize forward worker if defined */
@@ -2305,7 +2312,7 @@ static void child_init(apr_pool_t *p, server_rec *s)
             conf->forward->hostname = "*";
             conf->forward->scheme   = "*";
             ap_proxy_initialize_worker_share(conf, conf->forward, s);
-            ap_proxy_initialize_worker(conf->forward, s);
+            ap_proxy_initialize_worker(conf->forward, s, p);
             /* Do not disable worker in case of errors */
             conf->forward->s->status |= PROXY_WORKER_IGNORE_ERRORS;
             /* Disable address cache for generic forward worker */
@@ -2317,7 +2324,7 @@ static void child_init(apr_pool_t *p, server_rec *s)
             reverse->hostname = "*";
             reverse->scheme   = "*";
             ap_proxy_initialize_worker_share(conf, reverse, s);
-            ap_proxy_initialize_worker(reverse, s);
+            ap_proxy_initialize_worker(reverse, s, p);
             /* Do not disable worker in case of errors */
             reverse->s->status |= PROXY_WORKER_IGNORE_ERRORS;
             /* Disable address cache for generic reverse worker */
index 15390337f212fe635d9fac0099f114a98a7f971c..5de177725ee100a1823f369acaf875602b79a54b 100644 (file)
@@ -570,10 +570,12 @@ PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf,
  * Initize the worker
  * @param worker worker to initialize
  * @param s      current server record
+ * @param p      memory pool used for mutex and Connection pool.
  * @return       APR_SUCCESS or error code
  */
 PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker,
-                                                       server_rec *s);
+                                                       server_rec *s,
+                                                       apr_pool_t *p);
 /**
  * Get the balancer from proxy configuration
  * @param p     memory pool used for finding balancer
index 5fdee9d2c4de126934b9ccdb225ea1e1bab55bf0..cce4795e3d989a76900763e49e16b32b1edcc67d 100644 (file)
@@ -104,7 +104,7 @@ static int init_balancer_members(proxy_server_conf *conf, server_rec *s,
             }
         }
         ap_proxy_initialize_worker_share(conf, *workers, s);
-        ap_proxy_initialize_worker(*workers, s);
+        ap_proxy_initialize_worker(*workers, s, conf->pool);
         if (!worker_is_initialized) {
             /* Set to the original configuration */
             (*workers)->s->lbstatus = (*workers)->s->lbfactor =
index beb176faa19400e3a4c19962332733b715d2298f..9984260f349c54e335ea6c5705d453134bc52173 100644 (file)
@@ -1443,14 +1443,8 @@ PROXY_DECLARE(const char *) ap_proxy_add_worker(proxy_worker **worker,
     (*worker)->smax = -1;
     /* Increase the total worker count */
     proxy_lb_workers++;
-    init_conn_pool(p, *worker);
-#if APR_HAS_THREADS
-    if (apr_thread_mutex_create(&((*worker)->mutex),
-                APR_THREAD_MUTEX_DEFAULT, p) != APR_SUCCESS) {
-        /* XXX: Do we need to log something here */
-        return "can not create thread mutex";
-    }
-#endif
+    (*worker)->cp = NULL;
+    (*worker)->mutex = NULL;
 
     return NULL;
 }
@@ -1464,7 +1458,8 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker(apr_pool_t *p)
     worker->smax = -1;
     /* Increase the total worker count */
     proxy_lb_workers++;
-    init_conn_pool(p, worker);
+    worker->cp = NULL;
+    worker->mutex = NULL;
 
     return worker;
 }
@@ -1839,7 +1834,7 @@ PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf,
 
 }
 
-PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s)
+PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p)
 {
     apr_status_t rv;
 
@@ -1864,7 +1859,24 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, ser
         worker->is_address_reusable = 1;
     }
 
+    if (worker->cp == NULL)
+        init_conn_pool(p, worker);
+    if (worker->cp == NULL) {
+        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+            "can not create connection pool");
+        return ;
+    } 
+
 #if APR_HAS_THREADS
+    if (worker->mutex == NULL) {
+        rv = apr_thread_mutex_create(&(worker->mutex), APR_THREAD_MUTEX_DEFAULT, p);
+        if (rv != APR_SUCCESS) {
+            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+                "can not create thread mutex");
+            return rv;
+        }
+    }
+
     ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
     if (mpm_threads > 1) {
         /* Set hard max to no more then mpm_threads */