module AP_MODULE_DECLARE_DATA proxy_balancer_module;
-#if APR_HAS_THREADS1
+#if APR_HAS_THREADS
#define PROXY_BALANCER_LOCK(b) apr_thread_mutex_lock((b)->mutex)
#define PROXY_BALANCER_UNLOCK(b) apr_thread_mutex_unlock((b)->mutex)
#else
return NULL;
}
+static proxy_runtime_worker *find_best_worker(proxy_balancer *balancer,
+ request_rec *r)
+{
+ int i;
+ double total_status = 0.0;
+ proxy_runtime_worker *worker = (proxy_runtime_worker *)balancer->workers->elts;
+ proxy_runtime_worker *candidate = NULL;
+
+ /* First try to see if we have available candidate */
+ for (i = 0; i < balancer->workers->nelts; i++) {
+ /* If the worker is not error state
+ * or not in disabled mode
+ */
+
+ /* TODO: read the scoreboard status */
+ if (worker->w->status < 2) {
+ if (!candidate)
+ candidate = worker;
+ else {
+ /* See if the worker has a larger number of free channels */
+ if (worker->w->cp->nfree > candidate->w->cp->nfree)
+ candidate = worker;
+ }
+ total_status += worker->lbstatus;
+ }
+ worker++;
+ }
+ if (!candidate) {
+ /* All the workers are in error state or disabled.
+ * If the balancer has a timeout wait.
+ */
+#if APR_HAS_THREADS
+ if (balancer->timeout) {
+ /* XXX: This can perhaps be build using some
+ * smarter mechanism, like tread_cond.
+ * But since the statuses can came from
+ * different childs, use the provided algo.
+ */
+ apr_interval_time_t timeout = balancer->timeout;
+ apr_interval_time_t step, tval = 0;
+ balancer->timeout = 0;
+ step = timeout / 100;
+ while (tval < timeout) {
+ apr_sleep(step);
+ /* Try again */
+ if ((candidate = find_best_worker(balancer, r)))
+ break;
+ tval += step;
+ }
+ /* restore the timeout */
+ balancer->timeout = timeout;
+ }
+#endif
+ }
+ else {
+ /* We have at least one candidate that is not in
+ * error state or disabled.
+ * Now calculate the appropriate one
+ */
+ for (i = 0; i < balancer->workers->nelts; i++) {
+ /* If the worker is not error state
+ * or not in disabled mode
+ */
+ if (worker->w->status > 2) {
+ /* 1. Find the worker with higher lbstatus.
+ * Lbstatus is of higher importance then
+ * the number of empty slots.
+ */
+ if (worker->lbstatus > candidate->lbstatus) {
+ candidate = worker;
+ }
+ }
+ worker++;
+ }
+ /* XXX: The lbfactor can be update using bytes transfered
+ * Right now, use the round-robin scheme
+ */
+ candidate->lbstatus += candidate->lbfactor;
+ if (candidate->lbstatus >= total_status)
+ candidate->lbstatus = candidate->lbfactor;
+ }
+ return candidate;
+}
+
static int proxy_balancer_pre_request(proxy_worker **worker,
proxy_balancer **balancer,
request_rec *r,
char *route;
apr_status_t rv;
+ *worker = NULL;
/* Spet 1: check if the url is for us */
if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url)))
return DECLINED;
return HTTP_SERVICE_UNAVAILABLE;
}
}
+ else {
+ /* We have a sticky load balancer */
+ *worker = runtime->w;
+ }
/* Lock the LoadBalancer
* XXX: perhaps we need the process lock here
*/
"proxy_balancer_pre_request: lock");
return DECLINED;
}
+ if (!*worker) {
+ runtime = find_best_worker(*balancer, r);
+ if (!runtime) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
+ "balancer: (%s). All workers in error state.",
+ (*balancer)->name);
+
+ PROXY_BALANCER_UNLOCK(*balancer);
+ return HTTP_SERVICE_UNAVAILABLE;
+ }
+ /* TODO: rewrite the url to coresponds to worker scheme */
+
+ *worker = runtime->w;
+ }
+ /* Decrease the free channels number */
+ if ((*worker)->cp->nfree)
+ --(*worker)->cp->nfree;
PROXY_BALANCER_UNLOCK(*balancer);
if (!balancer)
access_status = DECLINED;
else {
-
+ apr_status_t rv;
+ if ((rv = PROXY_BALANCER_LOCK(balancer)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
+ "proxy_balancer_post_request: lock");
+ return DECLINED;
+ }
+ /* increase the free channels number */
+ if (worker->cp->nfree)
+ worker->cp->nfree++;
+ /* TODO: calculate the bytes transfered */
+
+ /* TODO: update the scoreboard status */
+ PROXY_BALANCER_UNLOCK(balancer);
access_status = OK;
}