]> granicus.if.org Git - apache/commitdiff
Merge r1828890, r1832500 from trunk:
authorJim Jagielski <jim@apache.org>
Thu, 31 May 2018 13:24:04 +0000 (13:24 +0000)
committerJim Jagielski <jim@apache.org>
Thu, 31 May 2018 13:24:04 +0000 (13:24 +0000)
mod_proxy_balancer: Add hot spare member type and corresponding flag (R). Hot spare members are
used as drop-in replacements for unusable workers in the same load balancer set. This differs
from hot standbys which are only used when all workers in a set are unusable. PR 61140.

mod_proxy_balancer: follow up to r1828890: indentation and 80 col.

Submitted by: jhriggs, ylavic
Reviewed by: jhriggs, jim, ylavic

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1832609 13f79535-47bb-0310-9956-ffa450edef68

CHANGES
STATUS
docs/manual/howto/reverse_proxy.xml
docs/manual/mod/mod_proxy.xml
modules/proxy/balancers/mod_lbmethod_bybusyness.c
modules/proxy/balancers/mod_lbmethod_byrequests.c
modules/proxy/balancers/mod_lbmethod_bytraffic.c
modules/proxy/mod_proxy.c
modules/proxy/mod_proxy.h
modules/proxy/mod_proxy_balancer.c
modules/proxy/proxy_util.c

diff --git a/CHANGES b/CHANGES
index 780f729d302add246f073a97df74762411079919..07ede5133af32cc7f591ca2b62d1653179a54d07 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,11 @@
                                                          -*- coding: utf-8 -*-
 Changes with Apache 2.4.34
 
+  *) mod_proxy_balancer: Add hot spare member type and corresponding flag (R).
+     Hot spare members are used as drop-in replacements for unusable workers
+     in the same load balancer set. This differs from hot standbys which are
+     only used when all workers in a set are unusable. PR 61140. [Jim Riggs]
+
   *) suexec: Add --enable-suexec-capabilites support on Linux, to use
      setuid/setgid capability bits rather than a setuid root binary.
      [Joe Orton]
@@ -5789,4 +5794,3 @@ Changes with Apache 2.2.x and later:
 Changes with Apache 2.0.x and later:
 
   *) http://svn.apache.org/viewvc/httpd/httpd/branches/2.0.x/CHANGES?view=markup
-
diff --git a/STATUS b/STATUS
index fdae7eb119e161fb807439b2fd2e9ff788920d75..0d7d5b8e6e4054590af97b797b1704c49e0f80e6 100644 (file)
--- a/STATUS
+++ b/STATUS
@@ -155,15 +155,6 @@ PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
      jailletc36: There should be a compatibility note for the new directives.
                  (done in r1828478)
 
-  *) mod_proxy_balancer: Add hot spare member type and corresponding flag (R). Hot spare members are
-     used as drop-in replacements for unusable workers in the same load balancer set. This differs
-     from hot standbys which are only used when all workers in a set are unusable. PR 61140.
-     trunk patch: https://svn.apache.org/r1828890
-     2.4.x patch: https://svn.apache.org/repos/asf/httpd/httpd/patches/2.4.x/hot-spare-2.4.patch
-     +1: jhriggs, jim
-     ylavic: +1 with r1832500.
-
-
 PATCHES PROPOSED TO BACKPORT FROM TRUNK:
   [ New proposals should be added at the end of the list ]
 
index ca986d36ebe3682e1d1503ed90714c1f915cf380..cef737cab36ee69e12ae932edc35ad98c8db1cbb 100644 (file)
@@ -182,20 +182,41 @@ ProxyPassReverse "/images"  "balancer://myset/"
     <title>Failover</title>
 
     <p>
-      You can also fine-tune various failover scenarios, detailing which
-      workers and even which balancers should accessed in such cases. For
-      example, the below setup implements 2 failover cases: In the first,
-      <code>http://hstandby.example.com:8080</code> is only sent traffic
-      if all other workers in the <em>myset</em> balancer are not available.
-      If that worker itself is not available, only then will the
-      <code>http://bkup1.example.com:8080</code> and <code>http://bkup2.example.com:8080</code>
-      workers be brought into rotation:
+      You can also fine-tune various failover scenarios, detailing which workers
+      and even which balancers should be accessed in such cases. For example, the
+      below setup implements three failover cases:
+    </p>
+    <ol>
+      <li>
+        <code>http://spare1.example.com:8080</code> and
+        <code>http://spare2.example.com:8080</code> are only sent traffic if one
+        or both of <code>http://www2.example.com:8080</code> or
+        <code>http://www3.example.com:8080</code> is unavailable. (One spare
+        will be used to replace one unusable member of the same balancer set.)
+      </li>
+      <li>
+        <code>http://hstandby.example.com:8080</code> is only sent traffic if
+        all other workers in balancer set <code>0</code> are not available.
+      </li>
+      <li>
+        If all load balancer set <code>0</code> workers, spares, and the standby
+        are unavailable, only then will the
+        <code>http://bkup1.example.com:8080</code> and
+        <code>http://bkup2.example.com:8080</code> workers from balancer set
+        <code>1</code> be brought into rotation.
+      </li>
+    </ol>
+    <p>
+      Thus, it is possible to have one or more hot spares and hot standbys for
+      each load balancer set.
     </p>
 
     <highlight language="config">
 &lt;Proxy balancer://myset&gt;
     BalancerMember http://www2.example.com:8080
     BalancerMember http://www3.example.com:8080 loadfactor=3 timeout=1
+    BalancerMember http://spare1.example.com:8080 status=+R
+    BalancerMember http://spare2.example.com:8080 status=+R
     BalancerMember http://hstandby.example.com:8080 status=+H
     BalancerMember http://bkup1.example.com:8080 lbset=1
     BalancerMember http://bkup2.example.com:8080 lbset=1
@@ -207,11 +228,12 @@ ProxyPassReverse "/images/"  "balancer://myset/"
     </highlight>
 
     <p>
-      The magic of this failover setup is setting <code>http://hstandby.example.com:8080</code>
-      with the <code>+H</code> status flag, which puts it in <em>hot standby</em> mode,
-      and making the 2 <code>bkup#</code> servers part of the #1 load balancer set (the
-      default set is 0); for failover, hot standbys (if they exist) are used 1st, when all regular
-      workers are unavailable; load balancer sets are always tried lowest number first.
+      For failover, hot spares are used as replacements for unusable workers in
+      the same load balancer set. A worker is considered unusable if it is
+      draining, stopped, or otherwise in an error/failed state. Hot standbys are
+      used if all workers and spares in the load balancer set are
+      unavailable. Load balancer sets (with their respective hot spares and
+      standbys) are always tried in order from lowest to highest.
     </p>
 
   </section>
@@ -301,8 +323,12 @@ ProxyPassReverse "/images/"  "balancer://myset/"
         <tr><td><code>S</code></td><td><em>Stop</em></td><td>Worker is administratively stopped; will not accept requests
                     and will not be automatically retried</td></tr>
         <tr><td><code>I</code></td><td><em>Ign</em></td><td>Worker is in ignore-errors mode and will always be considered available.</td></tr>
+        <tr><td><code>R</code></td><td><em>Spar</em></td><td>Worker is a hot spare. For each worker in a given lbset that is unusable
+                    (draining, stopped, in error, etc.), a usable hot spare with the same lbset will be used in
+                    its place. Hot spares can help ensure that a specific number of workers are always available
+                    for use by a balancer.</td></tr>
         <tr><td><code>H</code></td><td><em>Stby</em></td><td>Worker is in hot-standby mode and will only be used if no other
-                    viable workers are available.</td></tr>
+                    viable workers or spares are available in the balancer set.</td></tr>
         <tr><td><code>E</code></td><td><em>Err</em></td><td>Worker is in an error state, usually due to failing pre-request check;
                     requests will not be proxied to this worker, but it will be retried depending on
                     the <code>retry</code> setting of the worker.</td></tr>
index b16a2cc83ea55d43ab082722b0b2c11af4647856..1f0c4e127a29b0c0e3ccdd84d3bbbb3c31a21a79 100644 (file)
@@ -1186,8 +1186,12 @@ ProxyPass "/mirror/foo/i" "!"
          <tr><td>D: Worker is disabled and will not accept any requests.</td></tr>
          <tr><td>S: Worker is administratively stopped.</td></tr>
          <tr><td>I: Worker is in ignore-errors mode and will always be considered available.</td></tr>
+         <tr><td>R: Worker is a hot spare. For each worker in a given lbset that is unusable
+                    (draining, stopped, in error, etc.), a usable hot spare with the same lbset will be used in
+                    its place. Hot spares can help ensure that a specific number of workers are always available
+                    for use by a balancer.</td></tr>
          <tr><td>H: Worker is in hot-standby mode and will only be used if no other
-                    viable workers are available.</td></tr>
+                    viable workers or spares are available in the balancer set.</td></tr>
          <tr><td>E: Worker is in an error state.</td></tr>
          <tr><td>N: Worker is in drain mode and will only accept existing sticky sessions
                     destined for itself and ignore all other requests.</td></tr>
@@ -1342,8 +1346,24 @@ ProxyPass "/" "balancer://mycluster/" stickysession=JSESSIONID|jsessionid nofail
 &lt;/Proxy&gt;
     </highlight>
 
+    <p>Configuring hot spares can help ensure that a certain number of
+    workers are always available for use per load balancer set:</p>
+    <highlight language="config">
+ProxyPass "/" "balancer://sparecluster/"
+&lt;Proxy balancer://sparecluster&gt;
+    BalancerMember ajp://1.2.3.4:8009
+    BalancerMember ajp://1.2.3.5:8009
+    # The servers below are hot spares. For each server above that is unusable
+    # (draining, stopped, unreachable, in error state, etc.), one of these spares
+    # will be used in its place. Two servers will always be available for a request
+    # unless one or more of the spares is also unusable.
+    BalancerMember ajp://1.2.3.6:8009 status=+R
+    BalancerMember ajp://1.2.3.7:8009 status=+R
+&lt;/Proxy&gt;
+    </highlight>
+
     <p>Setting up a hot-standby that will only be used if no other
-     members are available:</p>
+    members (or spares) are available in the load balancer set:</p>
     <highlight language="config">
 ProxyPass "/" "balancer://hotcluster/"
 &lt;Proxy "balancer://hotcluster"&gt;
index 30a8f55429c6224bbf745c8c91036ff474cb8361..709512bb5ee76906de0fcad7c50f68f4ce1ef9e4 100644 (file)
 
 module AP_MODULE_DECLARE_DATA lbmethod_bybusyness_module;
 
-static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
-        proxy_worker *worker, server_rec *s) = NULL;
+static int is_best_bybusyness(proxy_worker *current, proxy_worker *prev_best, void *baton)
+{
+    int *total_factor = (int *)baton;
+
+    current->s->lbstatus += current->s->lbfactor;
+    *total_factor += current->s->lbfactor;
+
+    return (
+        !prev_best
+        || (current->s->busy < prev_best->s->busy)
+        || (
+            (current->s->busy == prev_best->s->busy)
+            && (current->s->lbstatus > prev_best->s->lbstatus)
+        )
+    );
+}
 
 static proxy_worker *find_best_bybusyness(proxy_balancer *balancer,
-                                request_rec *r)
+                                          request_rec *r)
 {
-    int i;
-    proxy_worker **worker;
-    proxy_worker *mycandidate = NULL;
-    int cur_lbset = 0;
-    int max_lbset = 0;
-    int checking_standby;
-    int checked_standby;
-
     int total_factor = 0;
+    proxy_worker *worker =
+        ap_proxy_balancer_get_best_worker(balancer, r, is_best_bybusyness,
+                                          &total_factor);
 
-    if (!ap_proxy_retry_worker_fn) {
-        ap_proxy_retry_worker_fn =
-                APR_RETRIEVE_OPTIONAL_FN(ap_proxy_retry_worker);
-        if (!ap_proxy_retry_worker_fn) {
-            /* can only happen if mod_proxy isn't loaded */
-            return NULL;
-        }
-    }
-
-    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01211)
-                 "proxy: Entering bybusyness for BALANCER (%s)",
-                 balancer->s->name);
-
-    /* First try to see if we have available candidate */
-    do {
-
-        checking_standby = checked_standby = 0;
-        while (!mycandidate && !checked_standby) {
-
-            worker = (proxy_worker **)balancer->workers->elts;
-            for (i = 0; i < balancer->workers->nelts; i++, worker++) {
-                if  (!checking_standby) {    /* first time through */
-                    if ((*worker)->s->lbset > max_lbset)
-                        max_lbset = (*worker)->s->lbset;
-                }
-                if (
-                    ((*worker)->s->lbset != cur_lbset) ||
-                    (checking_standby ? !PROXY_WORKER_IS_STANDBY(*worker) : PROXY_WORKER_IS_STANDBY(*worker)) ||
-                    (PROXY_WORKER_IS_DRAINING(*worker))
-                    ) {
-                    continue;
-                }
-
-                /* If the worker is in error state run
-                 * retry on that worker. It will be marked as
-                 * operational if the retry timeout is elapsed.
-                 * The worker might still be unusable, but we try
-                 * anyway.
-                 */
-                if (!PROXY_WORKER_IS_USABLE(*worker)) {
-                    ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
-                }
-
-                /* Take into calculation only the workers that are
-                 * not in error state or not disabled.
-                 */
-                if (PROXY_WORKER_IS_USABLE(*worker)) {
-
-                    (*worker)->s->lbstatus += (*worker)->s->lbfactor;
-                    total_factor += (*worker)->s->lbfactor;
-
-                    if (!mycandidate
-                        || (*worker)->s->busy < mycandidate->s->busy
-                        || ((*worker)->s->busy == mycandidate->s->busy && (*worker)->s->lbstatus > mycandidate->s->lbstatus))
-                        mycandidate = *worker;
-
-                }
-
-            }
-
-            checked_standby = checking_standby++;
-
-        }
-
-        cur_lbset++;
-
-    } while (cur_lbset <= max_lbset && !mycandidate);
-
-    if (mycandidate) {
-        mycandidate->s->lbstatus -= total_factor;
-        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01212)
-                     "proxy: bybusyness selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
-                     mycandidate->s->name, mycandidate->s->busy, mycandidate->s->lbstatus);
-
+    if (worker) {
+        worker->s->lbstatus -= total_factor;
     }
 
-    return mycandidate;
+    return worker;
 }
 
 /* assumed to be mutex protected by caller */
index 83424cefed90d26ea9ab3e80b9f5facd364d7cfb..0483a70fecb712de392783f91e46e3ee3cd129d9 100644 (file)
 
 module AP_MODULE_DECLARE_DATA lbmethod_byrequests_module;
 
-static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
-        proxy_worker *worker, server_rec *s) = NULL;
+static int is_best_byrequests(proxy_worker *current, proxy_worker *prev_best, void *baton)
+{
+    int *total_factor = (int *)baton;
+
+    current->s->lbstatus += current->s->lbfactor;
+    *total_factor += current->s->lbfactor;
+
+    return (!prev_best || (current->s->lbstatus > prev_best->s->lbstatus));
+}
 
 /*
  * The idea behind the find_best_byrequests scheduler is the following:
@@ -70,82 +77,17 @@ static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
  *   b a d c d a c d b d ...
  *
  */
-
 static proxy_worker *find_best_byrequests(proxy_balancer *balancer,
                                 request_rec *r)
 {
-    int i;
     int total_factor = 0;
-    proxy_worker **worker;
-    proxy_worker *mycandidate = NULL;
-    int cur_lbset = 0;
-    int max_lbset = 0;
-    int checking_standby;
-    int checked_standby;
-
-    if (!ap_proxy_retry_worker_fn) {
-        ap_proxy_retry_worker_fn =
-                APR_RETRIEVE_OPTIONAL_FN(ap_proxy_retry_worker);
-        if (!ap_proxy_retry_worker_fn) {
-            /* can only happen if mod_proxy isn't loaded */
-            return NULL;
-        }
-    }
-
-    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01207)
-                 "proxy: Entering byrequests for BALANCER (%s)",
-                 balancer->s->name);
-
-    /* First try to see if we have available candidate */
-    do {
-        checking_standby = checked_standby = 0;
-        while (!mycandidate && !checked_standby) {
-            worker = (proxy_worker **)balancer->workers->elts;
-            for (i = 0; i < balancer->workers->nelts; i++, worker++) {
-                if (!checking_standby) {    /* first time through */
-                    if ((*worker)->s->lbset > max_lbset)
-                        max_lbset = (*worker)->s->lbset;
-                }
-                if (
-                    ((*worker)->s->lbset != cur_lbset) ||
-                    (checking_standby ? !PROXY_WORKER_IS_STANDBY(*worker) : PROXY_WORKER_IS_STANDBY(*worker)) ||
-                    (PROXY_WORKER_IS_DRAINING(*worker))
-                    ) {
-                    continue;
-                }
-
-                /* If the worker is in error state run
-                 * retry on that worker. It will be marked as
-                 * operational if the retry timeout is elapsed.
-                 * The worker might still be unusable, but we try
-                 * anyway.
-                 */
-                if (!PROXY_WORKER_IS_USABLE(*worker))
-                    ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
-                /* Take into calculation only the workers that are
-                 * not in error state or not disabled.
-                 */
-                if (PROXY_WORKER_IS_USABLE(*worker)) {
-                    (*worker)->s->lbstatus += (*worker)->s->lbfactor;
-                    total_factor += (*worker)->s->lbfactor;
-                    if (!mycandidate || (*worker)->s->lbstatus > mycandidate->s->lbstatus)
-                        mycandidate = *worker;
-                }
-            }
-            checked_standby = checking_standby++;
-        }
-        cur_lbset++;
-    } while (cur_lbset <= max_lbset && !mycandidate);
-
-    if (mycandidate) {
-        mycandidate->s->lbstatus -= total_factor;
-        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01208)
-                     "proxy: byrequests selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
-                     mycandidate->s->name, mycandidate->s->busy, mycandidate->s->lbstatus);
+    proxy_worker *worker = ap_proxy_balancer_get_best_worker(balancer, r, is_best_byrequests, &total_factor);
 
+    if (worker) {
+        worker->s->lbstatus -= total_factor;
     }
 
-    return mycandidate;
+    return worker;
 }
 
 /* assumed to be mutex protected by caller */
index 6cf2478de55b3d7865f5dfa2b6a652aad5615b97..343c59ac32a510dab1e5bd9ccc905182007fd110 100644 (file)
 
 module AP_MODULE_DECLARE_DATA lbmethod_bytraffic_module;
 
-static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
-        proxy_worker *worker, server_rec *s) = NULL;
+static int is_best_bytraffic(proxy_worker *current, proxy_worker *prev_best, void *baton)
+{
+    apr_off_t *min_traffic = (apr_off_t *)baton;
+    apr_off_t traffic = (current->s->transferred / current->s->lbfactor)
+                        + (current->s->read / current->s->lbfactor);
+
+    if (!prev_best || (traffic < *min_traffic)) {
+        *min_traffic = traffic;
+
+        return TRUE;
+    }
+
+    return FALSE;
+}
 
 /*
  * The idea behind the find_best_bytraffic scheduler is the following:
@@ -45,79 +57,10 @@ static int (*ap_proxy_retry_worker_fn)(const char *proxy_function,
 static proxy_worker *find_best_bytraffic(proxy_balancer *balancer,
                                          request_rec *r)
 {
-    int i;
-    apr_off_t mytraffic = 0;
-    apr_off_t curmin = 0;
-    proxy_worker **worker;
-    proxy_worker *mycandidate = NULL;
-    int cur_lbset = 0;
-    int max_lbset = 0;
-    int checking_standby;
-    int checked_standby;
-
-    if (!ap_proxy_retry_worker_fn) {
-        ap_proxy_retry_worker_fn =
-                APR_RETRIEVE_OPTIONAL_FN(ap_proxy_retry_worker);
-        if (!ap_proxy_retry_worker_fn) {
-            /* can only happen if mod_proxy isn't loaded */
-            return NULL;
-        }
-    }
-
-    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01209)
-                 "proxy: Entering bytraffic for BALANCER (%s)",
-                 balancer->s->name);
-
-    /* First try to see if we have available candidate */
-    do {
-        checking_standby = checked_standby = 0;
-        while (!mycandidate && !checked_standby) {
-            worker = (proxy_worker **)balancer->workers->elts;
-            for (i = 0; i < balancer->workers->nelts; i++, worker++) {
-                if (!checking_standby) {    /* first time through */
-                    if ((*worker)->s->lbset > max_lbset)
-                        max_lbset = (*worker)->s->lbset;
-                }
-                if (
-                    ((*worker)->s->lbset != cur_lbset) ||
-                    (checking_standby ? !PROXY_WORKER_IS_STANDBY(*worker) : PROXY_WORKER_IS_STANDBY(*worker)) ||
-                    (PROXY_WORKER_IS_DRAINING(*worker))
-                    ) {
-                    continue;
-                }
-
-                /* If the worker is in error state run
-                 * retry on that worker. It will be marked as
-                 * operational if the retry timeout is elapsed.
-                 * The worker might still be unusable, but we try
-                 * anyway.
-                 */
-                if (!PROXY_WORKER_IS_USABLE(*worker))
-                    ap_proxy_retry_worker_fn("BALANCER", *worker, r->server);
-                /* Take into calculation only the workers that are
-                 * not in error state or not disabled.
-                 */
-                if (PROXY_WORKER_IS_USABLE(*worker)) {
-                    mytraffic = ((*worker)->s->transferred/(*worker)->s->lbfactor) +
-                                ((*worker)->s->read/(*worker)->s->lbfactor);
-                    if (!mycandidate || mytraffic < curmin) {
-                        mycandidate = *worker;
-                        curmin = mytraffic;
-                    }
-                }
-            }
-            checked_standby = checking_standby++;
-        }
-        cur_lbset++;
-    } while (cur_lbset <= max_lbset && !mycandidate);
-
-    if (mycandidate) {
-        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(01210)
-                     "proxy: bytraffic selected worker \"%s\" : busy %" APR_SIZE_T_FMT,
-                     mycandidate->s->name, mycandidate->s->busy);
-    }
+    apr_off_t min_traffic = 0;
 
-    return mycandidate;
+    return ap_proxy_balancer_get_best_worker(balancer, r, is_best_bytraffic,
+                                             &min_traffic);
 }
 
 /* assumed to be mutex protected by caller */
index e00a82dffa7445edea4828cd75372e4fcd6f9d92..bc3410c45aecb1026f1f70d9fdd54e2a8beebb88 100644 (file)
@@ -68,6 +68,7 @@ proxy_wstat_t PROXY_DECLARE_DATA proxy_wstat_tbl[] = {
     {PROXY_WORKER_STOPPED,       PROXY_WORKER_STOPPED_FLAG,       "Stop "},
     {PROXY_WORKER_IN_ERROR,      PROXY_WORKER_IN_ERROR_FLAG,      "Err "},
     {PROXY_WORKER_HOT_STANDBY,   PROXY_WORKER_HOT_STANDBY_FLAG,   "Stby "},
+    {PROXY_WORKER_HOT_SPARE,     PROXY_WORKER_HOT_SPARE_FLAG,     "Spar "},
     {PROXY_WORKER_FREE,          PROXY_WORKER_FREE_FLAG,          "Free "},
     {PROXY_WORKER_HC_FAIL,       PROXY_WORKER_HC_FAIL_FLAG,       "HcFl "},
     {0x0, '\0', NULL}
index bfa57602048273f4a04826eff7c482f2fa961662..63c2195ffe538c23e7d2f2d47ef188d11bb5607d 100644 (file)
@@ -307,6 +307,7 @@ struct proxy_conn_pool {
 #define PROXY_WORKER_HOT_STANDBY    0x0100
 #define PROXY_WORKER_FREE           0x0200
 #define PROXY_WORKER_HC_FAIL        0x0400
+#define PROXY_WORKER_HOT_SPARE      0x0800
 
 /* worker status flags */
 #define PROXY_WORKER_INITIALIZED_FLAG    'O'
@@ -320,6 +321,7 @@ struct proxy_conn_pool {
 #define PROXY_WORKER_HOT_STANDBY_FLAG    'H'
 #define PROXY_WORKER_FREE_FLAG           'F'
 #define PROXY_WORKER_HC_FAIL_FLAG        'C'
+#define PROXY_WORKER_HOT_SPARE_FLAG      'R'
 
 #define PROXY_WORKER_NOT_USABLE_BITMAP ( PROXY_WORKER_IN_SHUTDOWN | \
 PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR | \
@@ -330,6 +332,8 @@ PROXY_WORKER_HC_FAIL )
 
 #define PROXY_WORKER_IS_STANDBY(f)   ( (f)->s->status &  PROXY_WORKER_HOT_STANDBY )
 
+#define PROXY_WORKER_IS_SPARE(f)   ( (f)->s->status &  PROXY_WORKER_HOT_SPARE )
+
 #define PROXY_WORKER_IS_USABLE(f)   ( ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \
   PROXY_WORKER_IS_INITIALIZED(f) )
 
@@ -824,6 +828,23 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
                                                          server_rec *s,
                                                          apr_pool_t *p);
 
+typedef int (proxy_is_best_callback_fn_t)(proxy_worker *current, proxy_worker *prev_best, void *baton);
+
+/**
+ * Retrieve the best worker in a balancer for the current request
+ * @param balancer balancer for which to find the best worker
+ * @param r        current request record
+ * @param is_best  a callback function provide by the lbmethod
+ *                 that determines if the current worker is best
+ * @param baton    an lbmethod-specific context pointer (baton)
+ *                 passed to the is_best callback
+ * @return         the best worker to be used for the request
+ */
+PROXY_DECLARE(proxy_worker *) ap_proxy_balancer_get_best_worker(proxy_balancer *balancer,
+                                                                request_rec *r,
+                                                                proxy_is_best_callback_fn_t *is_best,
+                                                                void *baton);
+
 /**
  * Find the shm of the worker as needed
  * @param storage slotmem provider
index 9605dc04b8b77d72126b086a4c18b203b63b151c..7c12f403164f4a4e701934aac8356bbc787b398c 100644 (file)
@@ -1237,6 +1237,9 @@ static int balancer_handler(request_rec *r)
         if ((val = apr_table_get(params, "w_status_H"))) {
             ap_proxy_set_wstatus(PROXY_WORKER_HOT_STANDBY_FLAG, atoi(val), wsel);
         }
+        if ((val = apr_table_get(params, "w_status_R"))) {
+            ap_proxy_set_wstatus(PROXY_WORKER_HOT_SPARE_FLAG, atoi(val), wsel);
+        }
         if ((val = apr_table_get(params, "w_status_S"))) {
             ap_proxy_set_wstatus(PROXY_WORKER_STOPPED_FLAG, atoi(val), wsel);
         }
@@ -1763,7 +1766,8 @@ static int balancer_handler(request_rec *r)
                      "<th>Ignore Errors</th>"
                      "<th>Draining Mode</th>"
                      "<th>Disabled</th>"
-                     "<th>Hot Standby</th>", r);
+                     "<th>Hot Standby</th>"
+                     "<th>Hot Spare</th>", r);
             if (hc_show_exprs_f) {
                 ap_rputs("<th>HC Fail</th>", r);
             }
@@ -1772,6 +1776,7 @@ static int balancer_handler(request_rec *r)
             create_radio("w_status_N", (PROXY_WORKER_IS(wsel, PROXY_WORKER_DRAIN)), r);
             create_radio("w_status_D", (PROXY_WORKER_IS(wsel, PROXY_WORKER_DISABLED)), r);
             create_radio("w_status_H", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HOT_STANDBY)), r);
+            create_radio("w_status_R", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HOT_SPARE)), r);
             if (hc_show_exprs_f) {
                 create_radio("w_status_C", (PROXY_WORKER_IS(wsel, PROXY_WORKER_HC_FAIL)), r);
             }
index e848d313986eef434773f7cd5f6b46c242d97574..dba06b5678b06108a7350ca04479395b7674154a 100644 (file)
@@ -68,6 +68,7 @@ static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r);
 static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r);
 static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r);
 static int proxy_match_word(struct dirconn_entry *This, request_rec *r);
+static int ap_proxy_retry_worker(const char *proxy_function, proxy_worker *worker, server_rec *s);
 
 APR_IMPLEMENT_OPTIONAL_HOOK_RUN_ALL(proxy, PROXY, int, create_req,
                                    (request_rec *r, request_rec *pr), (r, pr),
@@ -1300,6 +1301,121 @@ PROXY_DECLARE(apr_status_t) ap_proxy_initialize_balancer(proxy_balancer *balance
     return APR_SUCCESS;
 }
 
+PROXY_DECLARE(proxy_worker *) ap_proxy_balancer_get_best_worker(proxy_balancer *balancer,
+                                                                request_rec *r,
+                                                                proxy_is_best_callback_fn_t *is_best,
+                                                                void *baton)
+{
+    int i = 0;
+    int cur_lbset = 0;
+    int max_lbset = 0;
+    int unusable_workers = 0;
+    apr_pool_t *tpool = NULL;
+    apr_array_header_t *spares = NULL;
+    apr_array_header_t *standbys = NULL;
+    proxy_worker *worker = NULL;
+    proxy_worker *best_worker = NULL;
+
+    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(10122)
+                 "proxy: Entering %s for BALANCER (%s)",
+                 balancer->lbmethod->name, balancer->s->name);
+
+    apr_pool_create(&tpool, r->pool);
+
+    spares = apr_array_make(tpool, 1, sizeof(proxy_worker*));
+    standbys = apr_array_make(tpool, 1, sizeof(proxy_worker*));
+
+    /* Process lbsets in order, only replacing unusable workers in a given lbset
+     * with available spares from the same lbset. Hot standbys will be used as a
+     * last resort when all other workers and spares are unavailable.
+     */
+    for (cur_lbset = 0; !best_worker && (cur_lbset <= max_lbset); cur_lbset++) {
+        unusable_workers = 0;
+        apr_array_clear(spares);
+        apr_array_clear(standbys);
+
+        for (i = 0; i < balancer->workers->nelts; i++) {
+            worker = APR_ARRAY_IDX(balancer->workers, i, proxy_worker *);
+
+            if (worker->s->lbset > max_lbset) {
+                max_lbset = worker->s->lbset;
+            }
+
+            if (worker->s->lbset != cur_lbset) {
+                continue;
+            }
+
+            /* A draining worker that is neither a spare nor a standby should be
+             * considered unusable to be replaced by spares.
+             */
+            if (PROXY_WORKER_IS_DRAINING(worker)) {
+                if (!PROXY_WORKER_IS_SPARE(worker) && !PROXY_WORKER_IS_STANDBY(worker)) {
+                    unusable_workers++;
+                }
+
+                continue;
+            }
+
+            /* If the worker is in error state run retry on that worker. It will
+             * be marked as operational if the retry timeout is elapsed.  The
+             * worker might still be unusable, but we try anyway.
+             */
+            if (!PROXY_WORKER_IS_USABLE(worker)) {
+                ap_proxy_retry_worker("BALANCER", worker, r->server);
+            }
+
+            if (PROXY_WORKER_IS_SPARE(worker)) {
+                if (PROXY_WORKER_IS_USABLE(worker)) {
+                    APR_ARRAY_PUSH(spares, proxy_worker *) = worker;
+                }
+            }
+            else if (PROXY_WORKER_IS_STANDBY(worker)) {
+                if (PROXY_WORKER_IS_USABLE(worker)) {
+                    APR_ARRAY_PUSH(standbys, proxy_worker *) = worker;
+                }
+            }
+            else if (PROXY_WORKER_IS_USABLE(worker)) {
+              if (is_best(worker, best_worker, baton)) {
+                best_worker = worker;
+              }
+            }
+            else {
+                unusable_workers++;
+            }
+        }
+
+        /* Check if any spares are best. */
+        for (i = 0; (i < spares->nelts) && (i < unusable_workers); i++) {
+          worker = APR_ARRAY_IDX(spares, i, proxy_worker *);
+
+          if (is_best(worker, best_worker, baton)) {
+            best_worker = worker;
+          }
+        }
+
+        /* If no workers are available, use the standbys. */
+        if (!best_worker) {
+            for (i = 0; i < standbys->nelts; i++) {
+              worker = APR_ARRAY_IDX(standbys, i, proxy_worker *);
+
+              if (is_best(worker, best_worker, baton)) {
+                best_worker = worker;
+              }
+            }
+        }
+    }
+
+    apr_pool_destroy(tpool);
+
+    if (best_worker) {
+        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, APLOGNO(10123)
+                     "proxy: %s selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
+                     balancer->lbmethod->name, best_worker->s->name, best_worker->s->busy, best_worker->s->lbstatus);
+    }
+
+    return best_worker;
+}
+
 /*
  * CONNECTION related...
  */