arr = apr_table_elts(params);
elts = (const apr_table_entry_t *)arr->elts;
/* Distinguish the balancer from worker */
- if (ap_proxy_valid_balancer_name(r)) {
+ if (ap_proxy_valid_balancer_name(r, 9)) {
proxy_balancer *balancer = ap_proxy_get_balancer(cmd->pool, conf, r);
if (!balancer) {
const char *err = ap_proxy_define_balancer(cmd->pool, &balancer, conf, r);
name = ap_getword_conf(cmd->temp_pool, &arg);
}
- if (ap_proxy_valid_balancer_name(name)) {
+ if (ap_proxy_valid_balancer_name(name, 9)) {
balancer = ap_proxy_get_balancer(cmd->pool, conf, name);
if (!balancer) {
if (in_proxy_section) {
return apr_pstrcat(cmd->pool, thiscmd->name,
"> arguments are not supported for non url.",
NULL);
- if (ap_proxy_valid_balancer_name((char*)conf->p)) {
+ if (ap_proxy_valid_balancer_name((char *)conf->p, 9)) {
balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p);
if (!balancer) {
err = ap_proxy_define_balancer(cmd->pool, &balancer,
void *sconf = s->module_config;
proxy_server_conf *conf;
proxy_worker *worker;
+ int i;
conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
/*
* NOTE: non-balancer members don't use shm at all...
* after all, why should they?
*/
+ worker = (proxy_worker *)conf->workers->elts;
+ for (i = 0; i < conf->workers->nelts; i++, worker++) {
+ ap_proxy_initialize_worker(worker, s, conf->pool);
+ }
/* Create and initialize forward worker if defined */
if (conf->req_set && conf->req) {
- ap_proxy_define_worker(p, &worker, NULL, NULL, "http://www.apache.org");
- conf->forward = worker;
+ proxy_worker *forward;
+ ap_proxy_define_worker(p, &forward, NULL, NULL, "http://www.apache.org");
+ conf->forward = forward;
PROXY_STRNCPY(conf->forward->s->name, "proxy:forward");
PROXY_STRNCPY(conf->forward->s->hostname, "*");
PROXY_STRNCPY(conf->forward->s->scheme, "*");
conf->forward->s->status |= PROXY_WORKER_IGNORE_ERRORS;
/* Disable address cache for generic forward worker */
conf->forward->s->is_address_reusable = 0;
+ ap_proxy_initialize_worker(conf->forward, s, conf->pool);
}
if (!reverse) {
ap_proxy_define_worker(p, &reverse, NULL, NULL, "http://www.apache.org");
reverse->s->is_address_reusable = 0;
}
conf->reverse = reverse;
+ ap_proxy_initialize_worker(conf->reverse, s, conf->pool);
s = s->next;
}
}
#include "http_connection.h"
#include "util_filter.h"
#include "util_ebcdic.h"
+#include "util_md5.h"
#include "ap_provider.h"
#include "ap_slotmem.h"
status_full
} proxy_status; /* Status display options */
apr_sockaddr_t *source_address;
- apr_global_mutex_t *mutex; /* global lock for updating lb params */
+ apr_global_mutex_t *mutex; /* global lock (needed??) */
int req_set:1;
int viaopt_set:1;
proxy_worker_shared *s; /* Shared data */
proxy_balancer *balancer; /* which balancer am I in? */
apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */
+ int local_status; /* status of per-process worker */
void *context; /* general purpose storage */
};
int growth; /* number of post-config workers can added */
int max_workers; /* maximum number of allowed workers */
const char *name; /* name of the load balancer */
+ const char *sname; /* filesystem safe balancer name */
apr_interval_time_t timeout; /* Timeout for waiting on free connection */
const char *lbprovider; /* name of the lbmethod provider to use */
proxy_balancer_method *lbmethod;
int max_attempts_set:1;
void *context; /* general purpose storage */
apr_time_t updated; /* timestamp of last update */
+ apr_global_mutex_t *mutex; /* global lock for updating lb params */
};
struct proxy_balancer_method {
/**
* Verifies valid balancer name (eg: balancer://foo)
* @param name name to test
- * @return ptr to start of name or NULL if not valid
+ * @param i number of chars to test; 0 for all.
+ * @return true/false
*/
-PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(char *name);
+PROXY_DECLARE(int) ap_proxy_valid_balancer_name(char *name, int i);
/**
const char *err;
apr_port_t port = 0;
+ /* TODO: offset of BALANCER_PREFIX ?? */
if (strncasecmp(url, "balancer:", 9) == 0) {
url += 9;
}
if (path == NULL)
return HTTP_BAD_REQUEST;
- r->filename = apr_pstrcat(r->pool, "proxy:balancer://", host,
+ r->filename = apr_pstrcat(r->pool, "proxy:", BALANCER_PREFIX, host,
"/", path, (search) ? "?" : "", (search) ? search : "", NULL);
r->path_info = apr_pstrcat(r->pool, "/", path, NULL);
for (i = 0; i < balancer->workers->nelts; i++) {
int worker_is_initialized;
proxy_worker *worker = *workers;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "Looking at %s -> %s initialized?", balancer->name, worker->s->name);
worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker);
if (!worker_is_initialized) {
ap_proxy_initialize_worker(worker, s, conf->pool);
{
proxy_worker *candidate = NULL;
apr_status_t rv;
- proxy_server_conf *conf = (proxy_server_conf *)
- ap_get_module_config(r->server->module_config, &proxy_module);
- if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_LOCK(balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Lock failed for find_best_worker()", balancer->name);
return NULL;
return NULL;
*/
- if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for find_best_worker()", balancer->name);
}
/* Step 2: Lock the LoadBalancer
* XXX: perhaps we need the process lock here
*/
- if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_LOCK(*balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Lock failed for pre_request",
(*balancer)->name);
ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
"proxy: BALANCER: (%s). All workers are in error state for route (%s)",
(*balancer)->name, route);
- if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for pre_request",
(*balancer)->name);
}
}
- if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(*balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for pre_request",
(*balancer)->name);
apr_status_t rv;
- if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_LOCK(balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Lock failed for post_request",
balancer->name);
}
}
- if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for post_request",
balancer->name);
static apr_status_t lock_remove(void *data)
{
+ int i;
+ proxy_balancer *balancer;
server_rec *s = data;
void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
- if (conf->mutex) {
- apr_global_mutex_destroy(conf->mutex);
- conf->mutex = NULL;
+
+ balancer = (proxy_balancer *)conf->balancers->elts;
+ for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+ if (balancer->mutex) {
+ apr_global_mutex_destroy(balancer->mutex);
+ balancer->mutex = NULL;
+ }
}
return(0);
}
{
apr_uuid_t uuid;
void *data;
- apr_status_t rv;
void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
const char *userdata_key = "mod_proxy_balancer_init";
* the process. */
apr_uuid_get(&uuid);
apr_uuid_format(balancer_nonce, &uuid);
-
- /* Create global mutex */
- rv = ap_global_mutex_create(&conf->mutex, NULL, balancer_mutex_type, NULL,
- s, pconf, 0);
- if (rv != APR_SUCCESS || !conf->mutex) {
- ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s,
- "mutex creation of %s failed", balancer_mutex_type);
- return HTTP_INTERNAL_SERVER_ERROR;
- }
-
- apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
- apr_pool_cleanup_null);
-
+
/*
* Get worker slotmem setup
while (s) {
int i,j;
apr_status_t rv;
+ proxy_balancer *balancer;
sconf = s->module_config;
conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
/* Initialize shared scoreboard data */
- proxy_balancer *balancer = (proxy_balancer *)conf->balancers->elts;
+ balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
proxy_worker **workers;
proxy_worker *worker;
ap_slotmem_instance_t *new = NULL;
balancer->max_workers = balancer->workers->nelts + balancer->growth;
-
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing create: %s, %d, %d",
- balancer->name, (int)sizeof(proxy_worker_shared),
+ balancer->sname = ap_md5(pconf, (const unsigned char *)balancer->name);
+
+ /* Create global mutex */
+ rv = ap_global_mutex_create(&(balancer->mutex), NULL, balancer_mutex_type,
+ balancer->sname, s, pconf, 0);
+ if (rv != APR_SUCCESS || !balancer->mutex) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s,
+ "mutex creation of %s : %s failed", balancer_mutex_type,
+ balancer->sname);
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+
+ apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
+ apr_pool_cleanup_null);
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing create: %s (%s), %d, %d",
+ balancer->name, balancer->sname,
+ (int)sizeof(proxy_worker_shared),
(int)balancer->max_workers);
- rv = storage->create(&new, balancer->name, sizeof(proxy_worker_shared),
+ rv = storage->create(&new, balancer->sname, sizeof(proxy_worker_shared),
balancer->max_workers, AP_SLOTMEM_TYPE_PREGRAB, pconf);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_create failed");
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_dptr failed");
return !OK;
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %x %pp %pp %pp %pp %d", worker->hash, worker->balancer, (char *)worker->context, worker->s, shm, (int)index);
if ((rv = ap_proxy_share_worker(worker, shm, index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Cannot share worker");
return !OK;
if ((name = apr_table_get(params, "b")))
bsel = ap_proxy_get_balancer(r->pool, conf,
- apr_pstrcat(r->pool, "balancer://", name, NULL));
+ apr_pstrcat(r->pool, BALANCER_PREFIX, name, NULL));
if ((name = apr_table_get(params, "w"))) {
wsel = ap_proxy_get_worker(r->pool, bsel, conf, name);
}
char fbuf[50];
worker = *workers;
ap_rvputs(r, "<tr>\n<td><a href=\"", r->uri, "?b=",
- balancer->name + sizeof("balancer://") - 1, "&w=",
+ balancer->name + sizeof(BALANCER_PREFIX) - 1, "&w=",
ap_escape_uri(r->pool, worker->s->name),
"&nonce=", balancer_nonce,
"\">", NULL);
ap_rvputs(r, "</table>\n<input type=hidden name=\"w\" ", NULL);
ap_rvputs(r, "value=\"", ap_escape_uri(r->pool, wsel->s->name), "\">\n", NULL);
ap_rvputs(r, "<input type=hidden name=\"b\" ", NULL);
- ap_rvputs(r, "value=\"", bsel->name + sizeof("balancer://") - 1,
+ ap_rvputs(r, "value=\"", bsel->name + sizeof(BALANCER_PREFIX) - 1,
"\">\n", NULL);
ap_rvputs(r, "<input type=hidden name=\"nonce\" value=\"",
balancer_nonce, "\">\n", NULL);
void *sconf = s->module_config;
proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
apr_status_t rv;
-
- if (!conf->mutex) {
- ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
- "no mutex %s", balancer_mutex_type);
- return;
- }
- /* Re-open the mutex for the child. */
- rv = apr_global_mutex_child_init(&conf->mutex,
- apr_global_mutex_lockfile(conf->mutex),
- p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
- "Failed to reopen mutex %s in child",
- balancer_mutex_type);
- exit(1); /* Ugly, but what else? */
- }
-
- /* Initialize shared scoreboard data */
+
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++) {
apr_size_t size;
unsigned int num;
- storage->attach(&(balancer->slot), balancer->name, &size, &num, p);
+
+ /*
+ * for each balancer we need to init the global
+ * mutex and then attach to the shared worker shm
+ */
+ if (!balancer->mutex) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "no mutex %s: %s", balancer->name,
+ balancer_mutex_type);
+ return;
+ }
+
+ /* Re-open the mutex for the child. */
+ rv = apr_global_mutex_child_init(&(balancer->mutex),
+ apr_global_mutex_lockfile(balancer->mutex),
+ p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "Failed to reopen mutex %: %s in child",
+ balancer->name, balancer_mutex_type);
+ exit(1); /* Ugly, but what else? */
+ }
+
+ /* now attach */
+ storage->attach(&(balancer->slot), balancer->sname, &size, &num, p);
if (!balancer->slot) {
ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_attach failed");
exit(1); /* Ugly, but what else? */
ap_get_module_config(r->server->module_config, &proxy_module);
proxy_balancer *balancer;
const char *real = ent[i].real;
- const char *bname;
/*
* First check if mapping against a balancer and see
* if we have such a entity. If so, then we need to
* or may not be the right one... basically, we need
* to find which member actually handled this request.
*/
- bname = ap_proxy_valid_balancer_name((char *)real);
- if (bname && (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
+ if (ap_proxy_valid_balancer_name((char *)real, 0) &&
+ (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
int n, l3 = 0;
proxy_worker **worker = (proxy_worker **)balancer->workers->elts;
- const char *urlpart = ap_strchr_c(bname, '/');
+ const char *urlpart = ap_strchr_c(real, '/');
if (urlpart) {
if (!urlpart[1])
urlpart = NULL;
*/
/*
- * verifies that the balancer name conforms to standards. If
- * so, returns ptr to the actual name (BALANCER_PREFIX removed),
- * otherwise NULL
+ * verifies that the balancer name conforms to standards.
*/
-PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(char *name)
+PROXY_DECLARE(int) ap_proxy_valid_balancer_name(char *name, int i)
{
- if (strncasecmp(name, BALANCER_PREFIX, sizeof(BALANCER_PREFIX)-1) == 0)
- return (name + sizeof(BALANCER_PREFIX)-1);
- else
- return NULL;
+ if (!i)
+ i = sizeof(BALANCER_PREFIX)-1;
+ return (!strncasecmp(name, BALANCER_PREFIX, i));
}
const char *url)
{
proxy_balancer *balancer;
- char *name, *q, *uri = apr_pstrdup(p, url);
+ char *c, *uri = apr_pstrdup(p, url);
int i;
-
- if (!(name = ap_proxy_valid_balancer_name(uri)))
- return NULL;
-
+
+ c = strchr(uri, ':');
+ if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
+ return NULL;
+ }
/* remove path from uri */
- if ((q = strchr(name, '/')))
- *q = '\0';
-
+ if ((c = strchr(c + 3, '/'))) {
+ *c = '\0';
+ }
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++) {
- if (strcasecmp(balancer->name, name) == 0) {
+ if (strcasecmp(balancer->name, uri) == 0) {
return balancer;
}
balancer++;
proxy_server_conf *conf,
const char *url)
{
- char *name, *q, *uri = apr_pstrdup(p, url);
+ char *c, *q, *uri = apr_pstrdup(p, url);
proxy_balancer_method *lbmethod;
/* We should never get here without a valid BALANCER_PREFIX... */
- name = ap_proxy_valid_balancer_name(uri);
- if (!name)
- return apr_pstrcat(p, "Bad syntax for a balancer name ", uri, NULL);
-
+ c = strchr(uri, ':');
+ if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0')
+ return "Bad syntax for a balancer name";
/* remove path from uri */
- if ((q = strchr(name, '/')))
+ if ((q = strchr(c + 3, '/')))
*q = '\0';
- ap_str_tolower(name);
+ ap_str_tolower(uri);
*balancer = apr_array_push(conf->balancers);
memset(*balancer, 0, sizeof(proxy_balancer));
return "Can't find 'byrequests' lb method";
}
- (*balancer)->name = name;
+ (*balancer)->name = uri;
(*balancer)->lbmethod = lbmethod;
(*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
(*balancer)->updated = apr_time_now();
+ (*balancer)->mutex = NULL;
return NULL;
}
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p)
{
- apr_status_t rv;
+ apr_status_t rv = APR_SUCCESS;
int mpm_threads;
if (worker->s->status & PROXY_WORKER_INITIALIZED) {
/* The worker is already initialized */
- return APR_SUCCESS;
- }
-
- /* Set default parameters */
- if (!worker->s->retry_set) {
- worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
- }
- /* By default address is reusable unless DisableReuse is set */
- if (worker->s->disablereuse) {
- worker->s->is_address_reusable = 0;
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "worker %s shared already initialized", worker->s->name);
}
else {
- worker->s->is_address_reusable = 1;
- }
-
- if (worker->cp == NULL)
- init_conn_pool(p, worker);
- if (worker->cp == NULL) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "can not create connection pool");
- return APR_EGENERAL;
- }
-
- if (worker->mutex == NULL) {
- rv = apr_thread_mutex_create(&(worker->mutex), APR_THREAD_MUTEX_DEFAULT, p);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "can not create thread mutex");
- return rv;
+ "initializing worker %s shared", worker->s->name);
+ /* Set default parameters */
+ if (!worker->s->retry_set) {
+ worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
}
- }
-
- ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
- if (mpm_threads > 1) {
- /* Set hard max to no more then mpm_threads */
- if (worker->s->hmax == 0 || worker->s->hmax > mpm_threads) {
- worker->s->hmax = mpm_threads;
+ /* By default address is reusable unless DisableReuse is set */
+ if (worker->s->disablereuse) {
+ worker->s->is_address_reusable = 0;
}
- if (worker->s->smax == -1 || worker->s->smax > worker->s->hmax) {
- worker->s->smax = worker->s->hmax;
+ else {
+ worker->s->is_address_reusable = 1;
}
- /* Set min to be lower then smax */
- if (worker->s->min > worker->s->smax) {
- worker->s->min = worker->s->smax;
+
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
+ if (mpm_threads > 1) {
+ /* Set hard max to no more then mpm_threads */
+ if (worker->s->hmax == 0 || worker->s->hmax > mpm_threads) {
+ worker->s->hmax = mpm_threads;
+ }
+ if (worker->s->smax == -1 || worker->s->smax > worker->s->hmax) {
+ worker->s->smax = worker->s->hmax;
+ }
+ /* Set min to be lower then smax */
+ if (worker->s->min > worker->s->smax) {
+ worker->s->min = worker->s->smax;
+ }
+ }
+ else {
+ /* This will supress the apr_reslist creation */
+ worker->s->min = worker->s->smax = worker->s->hmax = 0;
}
}
- else {
- /* This will supress the apr_reslist creation */
- worker->s->min = worker->s->smax = worker->s->hmax = 0;
- }
- if (worker->s->hmax) {
- rv = apr_reslist_create(&(worker->cp->res),
- worker->s->min, worker->s->smax,
- worker->s->hmax, worker->s->ttl,
- connection_constructor, connection_destructor,
- worker, worker->cp->pool);
-
- apr_pool_cleanup_register(worker->cp->pool, (void *)worker,
- conn_pool_cleanup,
- apr_pool_cleanup_null);
+ /* What if local is init'ed and shm isn't?? Even possible? */
+ if (worker->local_status & PROXY_WORKER_INITIALIZED) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "worker %s local already initialized", worker->s->name);
+ }
+ else {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "proxy: initialized worker in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
- getpid(), worker->s->hostname, worker->s->min,
- worker->s->hmax, worker->s->smax);
+ "initializing worker %s local", worker->s->name);
+ /* Now init local worker data */
+ if (worker->cp == NULL)
+ init_conn_pool(p, worker);
+ if (worker->cp == NULL) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "can not create connection pool");
+ return APR_EGENERAL;
+ }
- /* Set the acquire timeout */
- if (rv == APR_SUCCESS && worker->s->acquire_set) {
- apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
+ if (worker->mutex == NULL) {
+ rv = apr_thread_mutex_create(&(worker->mutex), APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "can not create thread mutex");
+ return rv;
+ }
}
- }
- else
- {
- void *conn;
+ if (worker->s->hmax) {
+ rv = apr_reslist_create(&(worker->cp->res),
+ worker->s->min, worker->s->smax,
+ worker->s->hmax, worker->s->ttl,
+ connection_constructor, connection_destructor,
+ worker, worker->cp->pool);
- rv = connection_constructor(&conn, worker, worker->cp->pool);
- worker->cp->conn = conn;
+ apr_pool_cleanup_register(worker->cp->pool, (void *)worker,
+ conn_pool_cleanup,
+ apr_pool_cleanup_null);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "proxy: initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
- getpid(), worker->s->hostname);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "proxy: initialized pool in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
+ getpid(), worker->s->hostname, worker->s->min,
+ worker->s->hmax, worker->s->smax);
+
+ /* Set the acquire timeout */
+ if (rv == APR_SUCCESS && worker->s->acquire_set) {
+ apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
+ }
+
+ }
+ else {
+ void *conn;
+
+ rv = connection_constructor(&conn, worker, worker->cp->pool);
+ worker->cp->conn = conn;
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
+ "proxy: initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
+ getpid(), worker->s->hostname);
+ }
}
if (rv == APR_SUCCESS) {
worker->s->status |= (PROXY_WORKER_INITIALIZED);
+ worker->local_status |= (PROXY_WORKER_INITIALIZED);
}
return rv;
}
if (worker->s->hmax && worker->cp->res) {
rv = apr_reslist_acquire(worker->cp->res, (void **)conn);
}
- else
- {
+ else {
/* create the new connection if the previous was destroyed */
if (!worker->cp->conn) {
connection_constructor((void **)conn, worker, worker->cp->pool);
if (method == PROXY_HASHFUNC_APR) {
apr_ssize_t slen = strlen(str);
return apr_hashfunc_default(str, &slen);
- } else if (method == PROXY_HASHFUNC_FNV) {
+ }
+ else if (method == PROXY_HASHFUNC_FNV) {
/* FNV model */
unsigned int hash;
const unsigned int fnv_prime = 0x811C9DC5;
hash ^= (*str);
}
return hash;
- } else { /* method == PROXY_HASHFUNC_DEFAULT */
+ }
+ else { /* method == PROXY_HASHFUNC_DEFAULT */
/* SDBM model */
unsigned int hash;
for (hash = 0; *str; str++) {