From: William A. Rowe Jr Date: Wed, 11 Aug 2004 22:58:03 +0000 (+0000) Subject: Add finding of a most suitable worker. X-Git-Tag: post_ajp_proxy~18 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=392b0fc5fc803f19d87d057c437536b935affc11;p=apache Add finding of a most suitable worker. Submitted by: mturk git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@104612 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/modules/proxy/proxy_balancer.c b/modules/proxy/proxy_balancer.c index 9d6d367f3e..b83da582c5 100644 --- a/modules/proxy/proxy_balancer.c +++ b/modules/proxy/proxy_balancer.c @@ -23,7 +23,7 @@ module AP_MODULE_DECLARE_DATA proxy_balancer_module; -#if APR_HAS_THREADS1 +#if APR_HAS_THREADS #define PROXY_BALANCER_LOCK(b) apr_thread_mutex_lock((b)->mutex) #define PROXY_BALANCER_UNLOCK(b) apr_thread_mutex_unlock((b)->mutex) #else @@ -130,6 +130,90 @@ static proxy_runtime_worker *find_session_route(proxy_balancer *balancer, return NULL; } +static proxy_runtime_worker *find_best_worker(proxy_balancer *balancer, + request_rec *r) +{ + int i; + double total_status = 0.0; + proxy_runtime_worker *worker = (proxy_runtime_worker *)balancer->workers->elts; + proxy_runtime_worker *candidate = NULL; + + /* First try to see if we have available candidate */ + for (i = 0; i < balancer->workers->nelts; i++) { + /* If the worker is not error state + * or not in disabled mode + */ + + /* TODO: read the scoreboard status */ + if (worker->w->status < 2) { + if (!candidate) + candidate = worker; + else { + /* See if the worker has a larger number of free channels */ + if (worker->w->cp->nfree > candidate->w->cp->nfree) + candidate = worker; + } + total_status += worker->lbstatus; + } + worker++; + } + if (!candidate) { + /* All the workers are in error state or disabled. + * If the balancer has a timeout wait. + */ +#if APR_HAS_THREADS + if (balancer->timeout) { + /* XXX: This can perhaps be build using some + * smarter mechanism, like tread_cond. + * But since the statuses can came from + * different childs, use the provided algo. + */ + apr_interval_time_t timeout = balancer->timeout; + apr_interval_time_t step, tval = 0; + balancer->timeout = 0; + step = timeout / 100; + while (tval < timeout) { + apr_sleep(step); + /* Try again */ + if ((candidate = find_best_worker(balancer, r))) + break; + tval += step; + } + /* restore the timeout */ + balancer->timeout = timeout; + } +#endif + } + else { + /* We have at least one candidate that is not in + * error state or disabled. + * Now calculate the appropriate one + */ + for (i = 0; i < balancer->workers->nelts; i++) { + /* If the worker is not error state + * or not in disabled mode + */ + if (worker->w->status > 2) { + /* 1. Find the worker with higher lbstatus. + * Lbstatus is of higher importance then + * the number of empty slots. + */ + if (worker->lbstatus > candidate->lbstatus) { + candidate = worker; + } + } + worker++; + } + /* XXX: The lbfactor can be update using bytes transfered + * Right now, use the round-robin scheme + */ + candidate->lbstatus += candidate->lbfactor; + if (candidate->lbstatus >= total_status) + candidate->lbstatus = candidate->lbfactor; + } + return candidate; +} + static int proxy_balancer_pre_request(proxy_worker **worker, proxy_balancer **balancer, request_rec *r, @@ -140,6 +224,7 @@ static int proxy_balancer_pre_request(proxy_worker **worker, char *route; apr_status_t rv; + *worker = NULL; /* Spet 1: check if the url is for us */ if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url))) return DECLINED; @@ -155,6 +240,10 @@ static int proxy_balancer_pre_request(proxy_worker **worker, return HTTP_SERVICE_UNAVAILABLE; } } + else { + /* We have a sticky load balancer */ + *worker = runtime->w; + } /* Lock the LoadBalancer * XXX: perhaps we need the process lock here */ @@ -163,6 +252,23 @@ static int proxy_balancer_pre_request(proxy_worker **worker, "proxy_balancer_pre_request: lock"); return DECLINED; } + if (!*worker) { + runtime = find_best_worker(*balancer, r); + if (!runtime) { + ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, + "balancer: (%s). All workers in error state.", + (*balancer)->name); + + PROXY_BALANCER_UNLOCK(*balancer); + return HTTP_SERVICE_UNAVAILABLE; + } + /* TODO: rewrite the url to coresponds to worker scheme */ + + *worker = runtime->w; + } + /* Decrease the free channels number */ + if ((*worker)->cp->nfree) + --(*worker)->cp->nfree; PROXY_BALANCER_UNLOCK(*balancer); @@ -178,8 +284,20 @@ static int proxy_balancer_post_request(proxy_worker *worker, if (!balancer) access_status = DECLINED; else { - + apr_status_t rv; + if ((rv = PROXY_BALANCER_LOCK(balancer)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server, + "proxy_balancer_post_request: lock"); + return DECLINED; + } + /* increase the free channels number */ + if (worker->cp->nfree) + worker->cp->nfree++; + /* TODO: calculate the bytes transfered */ + + /* TODO: update the scoreboard status */ + PROXY_BALANCER_UNLOCK(balancer); access_status = OK; }