const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL);
+
PROXY_COPY_CONF_PARAMS(worker, conf);
} else {
reuse = 1;
/* Try to find existing worker */
worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, name);
if (!worker) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server,
+ "Defining worker '%s' for balancer '%s'",
+ name, balancer->name);
if ((err = ap_proxy_define_worker(cmd->pool, &worker, balancer, conf, name)) != NULL)
return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server,
+ "Defined worker '%s' for balancer '%s'",
+ worker->s->name, balancer->name);
PROXY_COPY_CONF_PARAMS(worker, conf);
} else {
reuse = 1;
return apr_pstrcat(cmd->pool, thiscmd->name,
"> arguments are not supported for non url.",
NULL);
- if (ap_proxy_valid_balancer_name(conf->p)) {
+ if (ap_proxy_valid_balancer_name((char*)conf->p)) {
balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p);
if (!balancer) {
err = ap_proxy_define_balancer(cmd->pool, &balancer,
PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR )
/* NOTE: these check the shared status */
-#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s && \
- ( (f)->s->status & PROXY_WORKER_INITIALIZED ) )
+#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s->status & PROXY_WORKER_INITIALIZED )
-#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s && \
- ( (f)->s->status & PROXY_WORKER_HOT_STANDBY ) )
+#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s->status & PROXY_WORKER_HOT_STANDBY )
-#define PROXY_WORKER_IS_USABLE(f) ( (f)->s && \
- ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \
+#define PROXY_WORKER_IS_USABLE(f) ( ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \
PROXY_WORKER_IS_INITIALIZED(f) )
/* default worker retry timeout in seconds */
proxy_conn_pool *cp; /* Connection pool to use */
proxy_worker_shared *s; /* Shared data */
proxy_balancer *balancer; /* which balancer am I in? */
- void *context; /* general purpose storage */
apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */
+ void *context; /* general purpose storage */
};
/*
* @param worker worker to be shared
* @param shm location of shared info
* @param i index into shm
+ * @return APR_SUCCESS or error code
*/
-PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i);
+PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i);
/**
* Initialize the worker by setting up worker connection pool and mutex
static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
- ap_mutex_register(pconf, balancer_mutex_type, NULL, APR_LOCK_DEFAULT, 0);
+
+ apr_status_t rv;
+
+ rv = ap_mutex_register(pconf, balancer_mutex_type, NULL,
+ APR_LOCK_DEFAULT, 0);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
return OK;
}
proxy_balancer *balancer)
{
int i;
- proxy_worker *worker;
+ proxy_worker **workers;
- worker = (proxy_worker *)balancer->workers->elts;
+ workers = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++) {
int worker_is_initialized;
+ proxy_worker *worker = *workers;
worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker);
if (!worker_is_initialized) {
ap_proxy_initialize_worker(worker, s, conf->pool);
}
- ++worker;
+ ++workers;
}
/* Set default number of attempts to the number of
int checked_standby;
proxy_worker **workers;
- proxy_worker *worker;
checking_standby = checked_standby = 0;
while (!checked_standby) {
workers = (proxy_worker **)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++, workers++) {
- worker = *workers;
+ proxy_worker *worker = *workers;
if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) : PROXY_WORKER_IS_STANDBY(worker)) )
continue;
if (*(worker->s->route) && strcmp(worker->s->route, route) == 0) {
}
}
+static apr_status_t lock_remove(void *data)
+{
+ server_rec *s = data;
+ void *sconf = s->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
+ if (conf->mutex) {
+ apr_global_mutex_destroy(conf->mutex);
+ conf->mutex = NULL;
+ }
+ return(0);
+}
+
/* post_config hook: */
static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
const char *userdata_key = "mod_proxy_balancer_init";
- /* balancer_init() will be called twice during startup. So, only
+ /* balancer_post_config() will be called twice during startup. So, only
* set up the static data the 1st time through. */
apr_pool_userdata_get(&data, userdata_key, s->process->pool);
if (!data) {
apr_pool_userdata_set((const void *)1, userdata_key,
apr_pool_cleanup_null, s->process->pool);
+ return OK;
}
/* Retrieve a UUID and store the nonce for the lifetime of
* the process. */
/* Create global mutex */
rv = ap_global_mutex_create(&conf->mutex, NULL, balancer_mutex_type, NULL,
s, pconf, 0);
- if (rv != APR_SUCCESS) {
+ if (rv != APR_SUCCESS || !conf->mutex) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s,
+ "mutex creation of %s failed", balancer_mutex_type);
return HTTP_INTERNAL_SERVER_ERROR;
}
+ apr_pool_cleanup_register(pconf, (void *)s, lock_remove,
+ apr_pool_cleanup_null);
+
+
/*
* Get worker slotmem setup
*/
/* Initialize shared scoreboard data */
proxy_balancer *balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
- apr_size_t size;
- unsigned int num;
+ proxy_worker **workers;
proxy_worker *worker;
ap_slotmem_instance_t *new = NULL;
return !OK;
}
balancer->slot = new;
-#if 0
- rv = storage->attach(&(balancer->slot), balancer->name, &size, &num, pconf);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_attach failed");
- return !OK;
- }
-#endif
- worker = (proxy_worker *)balancer->workers->elts;
- for (j = 0; j < balancer->workers->nelts; j++, worker++) {
+
+ workers = (proxy_worker **)balancer->workers->elts;
+ for (j = 0; j < balancer->workers->nelts; j++, workers++) {
proxy_worker_shared *shm;
unsigned int index;
+ worker = *workers;
if ((rv = storage->grab(balancer->slot, &index)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_grab failed");
return !OK;
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_dptr failed");
return !OK;
}
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %pp %pp %d", worker->s, shm, (int)index);
- ap_proxy_share_worker(worker, shm, index);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %x %pp %pp %pp %pp %d", worker->hash, worker->balancer, (char *)worker->context, worker->s, shm, (int)index);
+ if ((rv = ap_proxy_share_worker(worker, shm, index)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Cannot share worker");
+ return !OK;
+ }
}
}
s = s->next;
"</httpd:scheme>\n", NULL);
ap_rvputs(r, " <httpd:hostname>", worker->s->hostname,
"</httpd:hostname>\n", NULL);
- ap_rprintf(r, " <httpd:loadfactor>%d</httpd:loadfactor>\n",
+ ap_rprintf(r, " <httpd:loadfactor>%d</httpd:loadfactor>\n",
worker->s->lbfactor);
ap_rputs(" </httpd:worker>\n", r);
++workers;
proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
apr_status_t rv;
+ if (!conf->mutex) {
+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
+ "no mutex %s", balancer_mutex_type);
+ return;
+ }
/* Re-open the mutex for the child. */
rv = apr_global_mutex_child_init(&conf->mutex,
apr_global_mutex_lockfile(conf->mutex),
int worker_name_length;
const char *c;
char *url_copy;
- int i, end;
+ int i;
c = ap_strchr_c(url, ':');
if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
ap_str_tolower(url_copy);
min_match = strlen(url_copy);
}
-
- if (balancer) {
- worker = (proxy_worker *)balancer->workers->elts;
- end = balancer->workers->nelts;
- } else {
- worker = (proxy_worker *)conf->workers->elts;
- end = conf->workers->nelts;
- }
-
/*
* Do a "longest match" on the worker name to find the worker that
* fits best to the URL, but keep in mind that we must have at least
* a minimum matching of length min_match such that
* scheme://hostname[:port] matches between worker and url.
*/
- for (i = 0; i < end; i++) {
- if ( ((worker_name_length = strlen(worker->s->name)) <= url_length)
- && (worker_name_length >= min_match)
- && (worker_name_length > max_match)
- && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) {
- max_worker = worker;
- max_match = worker_name_length;
+
+ if (balancer) {
+ proxy_worker **workers = (proxy_worker **)balancer->workers->elts;
+ for (i = 0; i < balancer->workers->nelts; i++, workers++) {
+ worker = *workers;
+ if ( ((worker_name_length = strlen(worker->s->name)) <= url_length)
+ && (worker_name_length >= min_match)
+ && (worker_name_length > max_match)
+ && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) {
+ max_worker = worker;
+ max_match = worker_name_length;
+ }
+
+ }
+ } else {
+ worker = (proxy_worker *)conf->workers->elts;
+ for (i = 0; i < conf->workers->nelts; i++, worker++) {
+ if ( ((worker_name_length = strlen(worker->s->name)) <= url_length)
+ && (worker_name_length >= min_match)
+ && (worker_name_length > max_match)
+ && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) {
+ max_worker = worker;
+ max_match = worker_name_length;
+ }
}
- worker++;
}
+
return max_worker;
}
*
* in which case the worker goes in the conf slot.
*/
- if (balancer)
- *worker = apr_array_push(balancer->workers);
- else if (conf)
+ if (balancer) {
+ proxy_worker **runtime;
+ /* recall that we get a ptr to the ptr here */
+ runtime = apr_array_push(balancer->workers);
+ *worker = *runtime = apr_palloc(p, sizeof(proxy_worker)); /* right to left baby */
+ } else if (conf) {
*worker = apr_array_push(conf->workers);
- else {
- proxy_worker *w = apr_palloc(p, sizeof(proxy_worker));
- *worker = w;
+ } else {
+ /* we need to allocate space here */
+ *worker = apr_palloc(p, sizeof(proxy_worker));
}
memset(*worker, 0, sizeof(proxy_worker));
/* right here we just want to tuck away the worker info.
* if called during config, we don't have shm setup yet,
* so just note the info for later. */
+#if 0
wstatus = malloc(sizeof(proxy_worker_shared)); /* will be freed ap_proxy_share_worker */
+#else
+ wstatus = apr_palloc(p, sizeof(proxy_worker_shared));
+#endif
memset(wstatus, 0, sizeof(proxy_worker_shared));
wstatus->hash = ap_proxy_hashfunc(wstatus->name, PROXY_HASHFUNC_DEFAULT);
(*worker)->hash = wstatus->hash;
+ (*worker)->context = NULL;
(*worker)->cp = NULL;
(*worker)->mutex = NULL;
(*worker)->balancer = balancer;
-
(*worker)->s = wstatus;
-
+
return NULL;
}
/*
* Create an already defined worker and free up memory
*/
-PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i)
+PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i)
{
+ if (!shm || !worker->s)
+ return APR_EINVAL;
+
memcpy(shm, worker->s, sizeof(proxy_worker_shared));
+#if 0
free(worker->s); /* was malloced in ap_proxy_define_worker */
+#endif
worker->s = shm;
worker->s->index = i;
+ return APR_SUCCESS;
}
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p)