mycandidate->s->lbstatus -= total_factor;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: bybusyness selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
- mycandidate->name, mycandidate->s->busy, mycandidate->s->lbstatus);
+ mycandidate->s->name, mycandidate->s->busy, mycandidate->s->lbstatus);
}
mycandidate->s->lbstatus -= total_factor;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: byrequests selected worker \"%s\" : busy %" APR_SIZE_T_FMT " : lbstatus %d",
- mycandidate->name, mycandidate->s->busy, mycandidate->s->lbstatus);
+ mycandidate->s->name, mycandidate->s->busy, mycandidate->s->lbstatus);
}
if (mycandidate) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: bytraffic selected worker \"%s\" : busy %" APR_SIZE_T_FMT,
- mycandidate->name, mycandidate->s->busy);
+ mycandidate->s->name, mycandidate->s->busy);
}
for (i = 0; i < balancer->workers->nelts; i++) {
worker = &APR_ARRAY_IDX(balancer->workers, i, proxy_worker *);
- server = apr_hash_get(servers, (*worker)->hostname, APR_HASH_KEY_STRING);
+ server = apr_hash_get(servers, (*worker)->s->hostname, APR_HASH_KEY_STRING);
if (!server) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r,
- "lb_heartbeat: No server for worker %s", (*worker)->name);
+ "lb_heartbeat: No server for worker %s", (*worker)->s->name);
continue;
}
#define MAX(x,y) ((x) >= (y) ? (x) : (y))
#endif
-/* return the sizeof of one lb_worker in scoreboard. */
-static int ap_proxy_lb_worker_size(void)
-{
- return sizeof(proxy_worker_shared);
-}
-
/*
* A Web proxy module. Stages:
*
/* Initialise worker if needed, note the shared area must be initialized by the balancer logic */
if (balancer) {
ap_proxy_initialize_worker(worker, r->server, conf->pool);
- ap_proxy_initialize_worker_share(conf, worker, r->server);
}
if (balancer && balancer->max_attempts_set && !max_attempts)
if (ap_proxy_valid_balancer_name(r)) {
proxy_balancer *balancer = ap_proxy_get_balancer(cmd->pool, conf, r);
if (!balancer) {
- const char *err = ap_proxy_alloc_balancer(&balancer,
- cmd->pool,
- conf, r);
+ const char *err = ap_proxy_define_balancer(cmd->pool, &balancer, conf, r);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL);
}
}
}
else {
- proxy_worker *worker = ap_proxy_get_worker(cmd->temp_pool, conf, r);
+ proxy_worker *worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, r);
int reuse = 0;
if (!worker) {
- const char *err = ap_proxy_add_worker(&worker, cmd->pool, conf, r);
+ const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL);
PROXY_COPY_CONF_PARAMS(worker, conf);
reuse = 1;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, cmd->server,
"Sharing worker '%s' instead of creating new worker '%s'",
- worker->name, new->real);
+ worker->s->name, new->real);
}
for (i = 0; i < arr->nelts; i++) {
if (reuse) {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
"Ignoring parameter '%s=%s' for worker '%s' because of worker sharing",
- elts[i].key, elts[i].val, worker->name);
+ elts[i].key, elts[i].val, worker->s->name);
} else {
const char *err = set_worker_param(cmd->pool, worker, elts[i].key,
elts[i].val);
const apr_table_entry_t *elts;
int reuse = 0;
int i;
+ const char *err;
if (cmd->path)
path = apr_pstrdup(cmd->pool, cmd->path);
ap_str_tolower(path); /* lowercase scheme://hostname */
+ /* Try to find the balancer */
+ balancer = ap_proxy_get_balancer(cmd->temp_pool, conf, path);
+ if (!balancer) {
+ err = ap_proxy_define_balancer(cmd->pool, &balancer, conf, path);
+ if (err)
+ return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
+ }
+
/* Try to find existing worker */
- worker = ap_proxy_get_worker(cmd->temp_pool, conf, name);
+ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, name);
if (!worker) {
- const char *err;
- if ((err = ap_proxy_add_worker(&worker, cmd->pool, conf, name)) != NULL)
+ if ((err = ap_proxy_define_worker(cmd->pool, &worker, balancer, conf, name)) != NULL)
return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
PROXY_COPY_CONF_PARAMS(worker, conf);
} else {
reuse = 1;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, cmd->server,
"Sharing worker '%s' instead of creating new worker '%s'",
- worker->name, name);
+ worker->s->name, name);
}
arr = apr_table_elts(params);
if (reuse) {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server,
"Ignoring parameter '%s=%s' for worker '%s' because of worker sharing",
- elts[i].key, elts[i].val, worker->name);
+ elts[i].key, elts[i].val, worker->s->name);
} else {
- const char *err = set_worker_param(cmd->pool, worker, elts[i].key,
+ err = set_worker_param(cmd->pool, worker, elts[i].key,
elts[i].val);
if (err)
return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
}
}
- /* Try to find the balancer */
- balancer = ap_proxy_get_balancer(cmd->temp_pool, conf, path);
- if (!balancer) {
- const char *err = ap_proxy_alloc_balancer(&balancer,
- cmd->pool,
- conf, path);
- if (err)
- return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL);
- }
- /* Add the worker to the load balancer */
- ap_proxy_add_worker_to_balancer(cmd->pool, balancer, worker);
+
return NULL;
}
name = ap_getword_conf(cmd->temp_pool, &arg);
}
- if (ap_proxy_valid_balancer_name(name) {
+ if (ap_proxy_valid_balancer_name(name)) {
balancer = ap_proxy_get_balancer(cmd->pool, conf, name);
if (!balancer) {
if (in_proxy_section) {
- err = ap_proxy_alloc_balancer(&balancer,
- cmd->pool,
- conf, name);
+ err = ap_proxy_define_balancer(cmd->pool, &balancer, conf, name);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxySet ",
err, NULL);
}
}
else {
- worker = ap_proxy_get_worker(cmd->temp_pool, conf, name);
+ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, conf, name);
if (!worker) {
if (in_proxy_section) {
- err = ap_proxy_add_worker(&worker, cmd->pool,
- conf, name);
+ err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
+ conf, name);
if (err)
return apr_pstrcat(cmd->temp_pool, "ProxySet ",
err, NULL);
return apr_pstrcat(cmd->pool, thiscmd->name,
"> arguments are not supported for non url.",
NULL);
- if (ap_proxy_valid_balancer_name(conf->p) {
+ if (ap_proxy_valid_balancer_name(conf->p)) {
balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p);
if (!balancer) {
- err = ap_proxy_alloc_balancer(&balancer,
- cmd->pool,
- sconf, conf->p);
+ err = ap_proxy_define_balancer(cmd->pool, &balancer,
+ sconf, conf->p);
if (err)
return apr_pstrcat(cmd->temp_pool, thiscmd->name,
" ", err, NULL);
}
}
else {
- worker = ap_proxy_get_worker(cmd->temp_pool, sconf,
+ worker = ap_proxy_get_worker(cmd->temp_pool, NULL, sconf,
conf->p);
if (!worker) {
- err = ap_proxy_add_worker(&worker, cmd->pool,
+ err = ap_proxy_define_worker(cmd->pool, &worker, NULL,
sconf, conf->p);
if (err)
return apr_pstrcat(cmd->temp_pool, thiscmd->name,
worker = (proxy_worker **)balancer->workers->elts;
for (n = 0; n < balancer->workers->nelts; n++) {
char fbuf[50];
- ap_rvputs(r, "<tr>\n<td>", (*worker)->scheme, "</td>", NULL);
- ap_rvputs(r, "<td>", (*worker)->hostname, "</td><td>", NULL);
+ ap_rvputs(r, "<tr>\n<td>", (*worker)->s->scheme, "</td>", NULL);
+ ap_rvputs(r, "<td>", (*worker)->s->hostname, "</td><td>", NULL);
if ((*worker)->s->status & PROXY_WORKER_DISABLED)
ap_rputs("Dis", r);
else if ((*worker)->s->status & PROXY_WORKER_IN_ERROR)
{
proxy_worker *reverse = NULL;
+ /* TODO */
while (s) {
void *sconf = s->module_config;
proxy_server_conf *conf;
proxy_worker *worker;
- int i;
conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
- /* Initialize worker's shared scoreboard data */
- worker = (proxy_worker *)conf->workers->elts;
- for (i = 0; i < conf->workers->nelts; i++) {
- ap_proxy_initialize_worker_share(conf, worker, s);
- ap_proxy_initialize_worker(worker, s, p);
- worker++;
- }
+ /*
+ * NOTE: non-balancer members don't use shm at all...
+ * after all, why should they?
+ */
/* Create and initialize forward worker if defined */
if (conf->req_set && conf->req) {
- conf->forward = ap_proxy_create_worker(p);
- conf->forward->name = "proxy:forward";
- conf->forward->hostname = "*";
- conf->forward->scheme = "*";
- ap_proxy_initialize_worker_share(conf, conf->forward, s);
- ap_proxy_initialize_worker(conf->forward, s, p);
- /* Do not disable worker in case of errors */
+ ap_proxy_define_worker(p, &worker, NULL, NULL, "http://www.apache.org");
+ conf->forward = worker;
+ PROXY_STRNCPY(conf->forward->s->name, "proxy:forward");
+ PROXY_STRNCPY(conf->forward->s->hostname, "*");
+ PROXY_STRNCPY(conf->forward->s->scheme, "*");
+ conf->forward->hash = conf->forward->s->hash =
+ ap_proxy_hashfunc(conf->forward->s->name, PROXY_HASHFUNC_DEFAULT);
+ /* Do not disable worker in case of errors */
conf->forward->s->status |= PROXY_WORKER_IGNORE_ERRORS;
/* Disable address cache for generic forward worker */
- conf->forward->is_address_reusable = 0;
+ conf->forward->s->is_address_reusable = 0;
}
if (!reverse) {
- reverse = ap_proxy_create_worker(p);
- reverse->name = "proxy:reverse";
- reverse->hostname = "*";
- reverse->scheme = "*";
- ap_proxy_initialize_worker_share(conf, reverse, s);
- ap_proxy_initialize_worker(reverse, s, p);
+ ap_proxy_define_worker(p, &reverse, NULL, NULL, "http://www.apache.org");
+ PROXY_STRNCPY(reverse->s->name, "proxy:reverse");
+ PROXY_STRNCPY(reverse->s->hostname, "*");
+ PROXY_STRNCPY(reverse->s->scheme, "*");
+ reverse->hash = reverse->s->hash =
+ ap_proxy_hashfunc(reverse->s->name, PROXY_HASHFUNC_DEFAULT);
/* Do not disable worker in case of errors */
reverse->s->status |= PROXY_WORKER_IGNORE_ERRORS;
/* Disable address cache for generic reverse worker */
- reverse->is_address_reusable = 0;
+ reverse->s->is_address_reusable = 0;
}
conf->reverse = reverse;
s = s->next;
* make sure that we are called after the mpm
* initializes.
*/
- static const char *const aszPred[] = { "mpm_winnt.c", "mod_proxy_balancer.c", NULL};
+ static const char *const aszPred[] = { "mpm_winnt.c", "mod_proxy_balancer.c", "mod_slotmem_shm.c", NULL};
- APR_REGISTER_OPTIONAL_FN(ap_proxy_lb_workers);
- APR_REGISTER_OPTIONAL_FN(ap_proxy_lb_worker_size);
/* handler */
ap_hook_handler(proxy_handler, NULL, NULL, APR_HOOK_FIRST);
/* filename-to-URI translation */
#include "apr_reslist.h"
#define APR_WANT_STRFUNC
#include "apr_want.h"
+#include "util_mutex.h"
#include "apr_global_mutex.h"
+#include "apr_thread_mutex.h"
#include "httpd.h"
#include "http_config.h"
/**
* Get the worker from proxy configuration
* @param p memory pool used for finding worker
- * @param conf current proxy server configuration
* @param balancer the balancer that the worker belongs to
+ * @param conf current proxy server configuration
* @param url url to find the worker from
* @return proxy_worker or NULL if not found
*/
PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
- proxy_server_conf *conf,
proxy_balancer *balancer,
+ proxy_server_conf *conf,
const char *url);
/**
* Define and Allocate space for the worker to proxy configuration
* @param url url containing worker name
* @return error message or NULL if successful (*worker is new worker)
*/
-PROXY_DECLARE(const char *) ap_proxy_define_worker(apr_pool_t *p,
+PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
proxy_worker **worker,
proxy_balancer *balancer,
proxy_server_conf *conf,
const char *url);
/**
- * Create new worker
- * @param p memory pool to allocate worker from
- * @param id slotnumber id or -1 for auto allocation
- * @return new worker
+ * Share a defined proxy worker via shm
+ * @param worker worker to be shared
+ * @param shm location of shared info
+ * @param i index into shm
*/
-PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker_wid(apr_pool_t *p, int id);
+PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i);
/**
- * Create new worker
- * @param p memory pool to allocate worker from
- * @return new worker
- */
-PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker(apr_pool_t *p);
-
-/**
- * Initialize the worker's shared data
- * @param conf current proxy server configuration
- * @param worker worker to initialize
- * @param s current server record
- * @param worker worker to initialize
- */
-PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf,
- proxy_worker *worker,
- server_rec *s);
-
-/**
- * Initialize the worker
+ * Initialize the worker by setting up worker connection pool and mutex
* @param worker worker to initialize
* @param s current server record
* @param p memory pool used for mutex and connection pool
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker,
server_rec *s,
apr_pool_t *p);
+
/**
* Verifies valid balancer name (eg: balancer://foo)
* @param name name to test
* @return ptr to start of name or NULL if not valid
*/
-PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(const char *name);
+PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(char *name);
/**
* @param url url containing balancer name
* @return error message or NULL if successfull
*/
-PROXY_DECLARE(const char *) ap_proxy_define_balancer(apr_pool_t *p,
+PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
proxy_balancer **balancer,
proxy_server_conf *conf,
const char *url);
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: AJP: request failed to %pI (%s)",
conn->worker->cp->addr,
- conn->worker->hostname);
+ conn->worker->s->hostname);
if (status == AJP_EOVERFLOW)
return HTTP_BAD_REQUEST;
else {
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: send failed to %pI (%s)",
conn->worker->cp->addr,
- conn->worker->hostname);
+ conn->worker->s->hostname);
/*
* It is fatal when we failed to send a (part) of the request
* body.
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: read response failed from %pI (%s)",
conn->worker->cp->addr,
- conn->worker->hostname);
+ conn->worker->s->hostname);
/*
* This is only non fatal when we have not sent (parts) of a possible
* request body so far (we do not store it and thus cannot send it
r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(output_brigade, e);
- if ((conn->worker->flush_packets == flush_on) ||
- ((conn->worker->flush_packets == flush_auto) &&
+ if ((conn->worker->s->flush_packets == flush_on) ||
+ ((conn->worker->s->flush_packets == flush_auto) &&
((rv = apr_poll(conn_poll, 1, &conn_poll_fd,
- conn->worker->flush_wait))
+ conn->worker->s->flush_wait))
!= APR_SUCCESS) &&
APR_STATUS_IS_TIMEUP(rv))) {
e = apr_bucket_flush_create(r->connection->bucket_alloc);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: got response from %pI (%s)",
conn->worker->cp->addr,
- conn->worker->hostname);
+ conn->worker->s->hostname);
rv = OK;
}
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: dialog to %pI (%s) failed",
conn->worker->cp->addr,
- conn->worker->hostname);
+ conn->worker->s->hostname);
/*
* If we already send data, signal a broken backend connection
* upwards in the chain.
}
/* Handle CPING/CPONG */
- if (worker->ping_timeout_set) {
+ if (worker->s->ping_timeout_set) {
status = ajp_handle_cping_cpong(backend->sock, r,
- worker->ping_timeout);
+ worker->s->ping_timeout);
/*
* In case the CPING / CPONG failed for the first time we might be
* just out of luck and got a faulty backend connection, but the
ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
"proxy: AJP: cping/cpong failed to %pI (%s)",
worker->cp->addr,
- worker->hostname);
+ worker->s->hostname);
status = HTTP_SERVICE_UNAVAILABLE;
retry++;
continue;
return OK;
}
-static int init_balancer_members(proxy_server_conf *conf, server_rec *s,
+static void init_balancer_members(proxy_server_conf *conf, server_rec *s,
proxy_balancer *balancer)
{
int i;
- proxy_worker **workers;
+ proxy_worker *worker;
- workers = (proxy_worker **)balancer->workers->elts;
+ worker = (proxy_worker *)balancer->workers->elts;
for (i = 0; i < balancer->workers->nelts; i++) {
int worker_is_initialized;
- worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(*workers);
- if (!worker_is_initialized) {
- proxy_worker_shared *slot;
- /*
- * If the worker is not initialized check whether its scoreboard
- * slot is already initialized.
- */
- slot = (proxy_worker_shared *) XXXXXap_get_scoreboard_lb((*workers)->id);
- if (slot) {
- worker_is_initialized = slot->status & PROXY_WORKER_INITIALIZED;
- }
- else {
- worker_is_initialized = 0;
- }
- }
- ap_proxy_initialize_worker_share(conf, *workers, s);
- ap_proxy_initialize_worker(*workers, s, conf->pool);
+ worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker);
if (!worker_is_initialized) {
- /* Set to the original configuration */
- (*workers)->s->lbstatus = (*workers)->s->lbfactor =
- ((*workers)->lbfactor ? (*workers)->lbfactor : 1);
- (*workers)->s->lbset = (*workers)->lbset;
+ ap_proxy_initialize_worker(worker, s, conf->pool);
}
- ++workers;
+ ++worker;
}
/* Set default number of attempts to the number of
balancer->max_attempts = balancer->workers->nelts - 1;
balancer->max_attempts_set = 1;
}
- return 0;
}
/* Retrieve the parameter with the given name
NULL));
}
- *url = apr_pstrcat(r->pool, worker->name, path, NULL);
+ *url = apr_pstrcat(r->pool, worker->s->name, path, NULL);
return OK;
}
(*worker)->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: BALANCER: (%s). Forcing recovery for worker (%s)",
- balancer->name, (*worker)->hostname);
+ balancer->name, (*worker)->s->hostname);
}
}
}
apr_table_setn(r->subprocess_env,
"BALANCER_NAME", (*balancer)->name);
apr_table_setn(r->subprocess_env,
- "BALANCER_WORKER_NAME", (*worker)->name);
+ "BALANCER_WORKER_NAME", (*worker)->s->name);
apr_table_setn(r->subprocess_env,
"BALANCER_WORKER_ROUTE", (*worker)->s->route);
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: BALANCER (%s) worker (%s) rewritten to %s",
- (*balancer)->name, (*worker)->name, *url);
+ (*balancer)->name, (*worker)->s->name, *url);
return access_status;
}
if (r->status == val) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Forcing recovery for worker (%s), failonstatus %d",
- balancer->name, worker->name, val);
+ balancer->name, worker->s->name, val);
worker->s->status |= PROXY_WORKER_IN_ERROR;
worker->s->error_time = apr_time_now();
break;
*/
while (s) {
int i,j;
+ apr_status_t rv;
sconf = s->module_config;
conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
- proxy_worker *worker;
/* Initialize shared scoreboard data */
- balancer = (proxy_balancer *)conf->balancers->elts;
+ proxy_balancer *balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++, balancer++) {
+ apr_size_t size;
+ unsigned int num;
proxy_worker *worker;
+ ap_slotmem_instance_t *new = NULL;
balancer->max_workers = balancer->workers->nelts + balancer->growth;
- storage->create(&balancer->slot, balancer->name, sizeof(proxy_worker_shared),
+
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing create: %s, %d, %d",
+ balancer->name, (int)sizeof(proxy_worker_shared),
+ (int)balancer->max_workers);
+
+ rv = storage->create(&new, balancer->name, sizeof(proxy_worker_shared),
balancer->max_workers, AP_SLOTMEM_TYPE_PREGRAB, pconf);
- if (!balancer->slot) {
- ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_create failed");
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_create failed");
+ return !OK;
+ }
+ balancer->slot = new;
+#if 0
+ rv = storage->attach(&(balancer->slot), balancer->name, &size, &num, pconf);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_attach failed");
return !OK;
}
- proxy_worker *worker = balancer->workers->elts;
+#endif
+ worker = (proxy_worker *)balancer->workers->elts;
for (j = 0; j < balancer->workers->nelts; j++, worker++) {
proxy_worker_shared *shm;
unsigned int index;
- if ((storage->grab(balancer->slot, &index) != APR_SUCCESS) ||;
- (storage->dptr(balancer->slot, index, &shm) != APR_SUCESS)) {
- ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_grab/dptr failed");
+
+ if ((rv = storage->grab(balancer->slot, &index)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_grab failed");
+ return !OK;
+
+ }
+ if ((rv = storage->dptr(balancer->slot, index, (void *)&shm)) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_dptr failed");
return !OK;
}
- ap_proxy_create_worker(worker, shm, index)
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %pp %pp %d", worker->s, shm, (int)index);
+ ap_proxy_share_worker(worker, shm, index);
}
}
s = s->next;
bsel = ap_proxy_get_balancer(r->pool, conf,
apr_pstrcat(r->pool, "balancer://", name, NULL));
if ((name = apr_table_get(params, "w"))) {
- proxy_worker *ws;
-
- ws = ap_proxy_get_worker(r->pool, conf, name);
- if (bsel && ws) {
- workers = (proxy_worker **)bsel->workers->elts;
- for (n = 0; n < bsel->workers->nelts; n++) {
- worker = *workers;
- if (strcasecmp(worker->name, ws->name) == 0) {
- wsel = worker;
- break;
- }
- ++workers;
- }
- }
+ wsel = ap_proxy_get_worker(r->pool, bsel, conf, name);
}
/* First set the params */
/*
}
}
if ((val = apr_table_get(params, "wr"))) {
- if (strlen(val) && strlen(val) < PROXY_WORKER_MAX_ROUTE_SIZ)
+ if (strlen(val) && strlen(val) < sizeof(wsel->s->route))
strcpy(wsel->s->route, val);
else
*wsel->s->route = '\0';
}
if ((val = apr_table_get(params, "rr"))) {
- if (strlen(val) && strlen(val) < PROXY_WORKER_MAX_ROUTE_SIZ)
+ if (strlen(val) && strlen(val) < sizeof(wsel->s->redirect))
strcpy(wsel->s->redirect, val);
else
*wsel->s->redirect = '\0';
for (n = 0; n < balancer->workers->nelts; n++) {
worker = *workers;
ap_rputs(" <httpd:worker>\n", r);
- ap_rvputs(r, " <httpd:scheme>", worker->scheme,
+ ap_rvputs(r, " <httpd:scheme>", worker->s->scheme,
"</httpd:scheme>\n", NULL);
- ap_rvputs(r, " <httpd:hostname>", worker->hostname,
+ ap_rvputs(r, " <httpd:hostname>", worker->s->hostname,
"</httpd:hostname>\n", NULL);
ap_rprintf(r, " <httpd:loadfactor>%d</httpd:loadfactor>\n",
worker->s->lbfactor);
worker = *workers;
ap_rvputs(r, "<tr>\n<td><a href=\"", r->uri, "?b=",
balancer->name + sizeof("balancer://") - 1, "&w=",
- ap_escape_uri(r->pool, worker->name),
+ ap_escape_uri(r->pool, worker->s->name),
"&nonce=", balancer_nonce,
"\">", NULL);
- ap_rvputs(r, worker->name, "</a></td>", NULL);
+ ap_rvputs(r, worker->s->name, "</a></td>", NULL);
ap_rvputs(r, "<td>", ap_escape_html(r->pool, worker->s->route),
NULL);
ap_rvputs(r, "</td><td>",
ap_rputs("<hr />\n", r);
if (wsel && bsel) {
ap_rputs("<h3>Edit worker settings for ", r);
- ap_rvputs(r, wsel->name, "</h3>\n", NULL);
+ ap_rvputs(r, wsel->s->name, "</h3>\n", NULL);
ap_rvputs(r, "<form method=\"GET\" action=\"", NULL);
ap_rvputs(r, r->uri, "\">\n<dl>", NULL);
ap_rputs("<table><tr><td>Load factor:</td><td><input name=\"lf\" type=text ", r);
ap_rputs("></td></tr>\n", r);
ap_rputs("<tr><td colspan=2><input type=submit value=\"Submit\"></td></tr>\n", r);
ap_rvputs(r, "</table>\n<input type=hidden name=\"w\" ", NULL);
- ap_rvputs(r, "value=\"", ap_escape_uri(r->pool, wsel->name), "\">\n", NULL);
+ ap_rvputs(r, "value=\"", ap_escape_uri(r->pool, wsel->s->name), "\">\n", NULL);
ap_rvputs(r, "<input type=hidden name=\"b\" ", NULL);
ap_rvputs(r, "value=\"", bsel->name + sizeof("balancer://") - 1,
"\">\n", NULL);
for (i = 0; i < conf->balancers->nelts; i++) {
apr_size_t size;
unsigned int num;
- storage->attach(&balancer->slot, balancer->name, &size, &num, p);
+ storage->attach(&(balancer->slot), balancer->name, &size, &num, p);
if (!balancer->slot) {
ap_log_error(APLOG_MARK, APLOG_NOERRNO|APLOG_EMERG, 0, s, "slotmem_attach failed");
- return !OK;
+ exit(1); /* Ugly, but what else? */
}
if (balancer->lbmethod && balancer->lbmethod->reset)
balancer->lbmethod->reset(balancer, s);
ob = apr_brigade_create(r->pool, c->bucket_alloc);
while (! done) {
- apr_interval_time_t timeout = conn->worker->timeout;
+ apr_interval_time_t timeout = conn->worker->s->timeout;
apr_size_t len;
int n;
/* We need SOME kind of timeout here, or virtually anything will
* cause timeout errors. */
- if (! conn->worker->timeout_set) {
+ if (! conn->worker->s->timeout_set) {
timeout = apr_time_from_sec(30);
}
{
int status;
- const char *flush_method = worker->flusher ? worker->flusher : "flush";
+ const char *flush_method = worker->s->flusher ? worker->s->flusher : "flush";
proxy_fdpass_flush *flush = ap_lookup_provider(PROXY_FDPASS_FLUSHER,
flush_method, "0");
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
"proxy: FTP: connecting %s to %s:%d", url, connectname, connectport);
- if (worker->is_address_reusable) {
+ if (worker->s->is_address_reusable) {
if (!worker->cp->addr) {
if ((err = PROXY_THREAD_LOCK(worker)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, err, r->server,
connectname, APR_UNSPEC,
connectport, 0,
address_pool);
- if (worker->is_address_reusable && !worker->cp->addr) {
+ if (worker->s->is_address_reusable && !worker->cp->addr) {
worker->cp->addr = connect_addr;
if ((uerr = PROXY_THREAD_UNLOCK(worker)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, uerr, r->server,
* To be compliant, we only use 100-Continue for requests with bodies.
* We also make sure we won't be talking HTTP/1.0 as well.
*/
- do_100_continue = (worker->ping_timeout_set
+ do_100_continue = (worker->s->ping_timeout_set
&& ap_request_has_body(r)
&& (PROXYREQ_REVERSE == r->proxyreq)
&& !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
dconf = ap_get_module_config(r->per_dir_config, &proxy_module);
- do_100_continue = (worker->ping_timeout_set
+ do_100_continue = (worker->s->ping_timeout_set
&& ap_request_has_body(r)
&& (PROXYREQ_REVERSE == r->proxyreq)
&& !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
/* Setup for 100-Continue timeout if appropriate */
if (do_100_continue) {
apr_socket_timeout_get(backend->sock, &old_timeout);
- if (worker->ping_timeout != old_timeout) {
+ if (worker->s->ping_timeout != old_timeout) {
apr_status_t rc;
- rc = apr_socket_timeout_set(backend->sock, worker->ping_timeout);
+ rc = apr_socket_timeout_set(backend->sock, worker->s->ping_timeout);
if (rc != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rc, r->server,
"proxy: could not set 100-Continue timeout");
/* Reset to old timeout iff we've adjusted it */
if (do_100_continue
&& (r->status == HTTP_CONTINUE)
- && (worker->ping_timeout != old_timeout)) {
+ && (worker->s->ping_timeout != old_timeout)) {
apr_socket_timeout_set(backend->sock, old_timeout);
}
}
* left waiting for a slow client to eventually
* acknowledge the data.
*/
- ap_proxy_release_connection(backend->worker->scheme,
+ ap_proxy_release_connection(backend->worker->s->scheme,
backend, r->server);
/* Ensure that the backend is not reused */
*backend_ptr = NULL;
* left waiting for a slow client to eventually
* acknowledge the data.
*/
- ap_proxy_release_connection(backend->worker->scheme,
+ ap_proxy_release_connection(backend->worker->s->scheme,
backend, r->server);
*backend_ptr = NULL;
*/
if ((status = ap_proxy_http_request(p, r, backend, worker,
conf, uri, locurl, server_portstr)) != OK) {
- if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->ping_timeout_set) {
+ if ((status == HTTP_SERVICE_UNAVAILABLE) && worker->s->ping_timeout_set) {
backend->close = 1;
ap_log_error(APLOG_MARK, APLOG_INFO, status, r->server,
"proxy: HTTP: 100-Continue failed to %pI (%s)",
- worker->cp->addr, worker->hostname);
+ worker->cp->addr, worker->s->hostname);
retry++;
continue;
} else {
* or may not be the right one... basically, we need
* to find which member actually handled this request.
*/
- bname = ap_proxy_valid_balancer_name(real);
+ bname = ap_proxy_valid_balancer_name((char *)real);
if (bname && (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
int n, l3 = 0;
proxy_worker **worker = (proxy_worker **)balancer->workers->elts;
* translate url http://example.com/foo/bar/that to /bash/that
*/
for (n = 0; n < balancer->workers->nelts; n++) {
- l2 = strlen((*worker)->name);
+ l2 = strlen((*worker)->s->name);
if (urlpart) {
/* urlpart (l3) assuredly starts with its own '/' */
- if ((*worker)->name[l2 - 1] == '/')
+ if ((*worker)->s->name[l2 - 1] == '/')
--l2;
if (l1 >= l2 + l3
- && strncasecmp((*worker)->name, url, l2) == 0
+ && strncasecmp((*worker)->s->name, url, l2) == 0
&& strncmp(urlpart, url + l2, l3) == 0) {
u = apr_pstrcat(r->pool, ent[i].fake, &url[l2 + l3],
NULL);
return ap_construct_url(r->pool, u, r);
}
}
- else if (l1 >= l2 && strncasecmp((*worker)->name, url, l2) == 0) {
+ else if (l1 >= l2 && strncasecmp((*worker)->s->name, url, l2) == 0) {
u = apr_pstrcat(r->pool, ent[i].fake, &url[l2], NULL);
return ap_construct_url(r->pool, u, r);
}
* so, returns ptr to the actual name (BALANCER_PREFIX removed),
* otherwise NULL
*/
-PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(const char *name)
+PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(char *name)
{
- if (strncasecmp(name, BALANCER_PREFIX, sizeof(BALANCER_PREFIX)) == 0)
- return (name + sizeof(BALANCER_PREFIX));
+ if (strncasecmp(name, BALANCER_PREFIX, sizeof(BALANCER_PREFIX)-1) == 0)
+ return (name + sizeof(BALANCER_PREFIX)-1);
else
return NULL;
}
return NULL;
}
-PROXY_DECLARE(const char *) ap_proxy_define_balancer(proxy_balancer **balancer,
- apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url)
+PROXY_DECLARE(char *) ap_proxy_define_balancer(apr_pool_t *p,
+ proxy_balancer **balancer,
+ proxy_server_conf *conf,
+ const char *url)
{
char *name, *q, *uri = apr_pstrdup(p, url);
proxy_balancer_method *lbmethod;
/* We should never get here without a valid BALANCER_PREFIX... */
- if (!(name = ap_proxy_valid_balancer_name(uri)))
- return "Bad syntax for a balancer name";
+ name = ap_proxy_valid_balancer_name(uri);
+ if (!name)
+ return apr_pstrcat(p, "Bad syntax for a balancer name ", uri, NULL);
/* remove path from uri */
if ((q = strchr(name, '/')))
* Create an already defined balancer and free up memory.
* Placeholder for when we make +/- of balancers runtime as well
*/
-PROXY_DECLARE(void) ap_proxy_create_balancer(TODO)
+PROXY_DECLARE(void) ap_proxy_share_balancer(TODO)
{
}
ap_log_perror(APLOG_MARK, APLOG_ERR, 0, conn->pool,
"proxy: Pooled connection 0x%pp for worker %s has been"
" already returned to the connection pool.", conn,
- worker->name);
+ worker->s->name);
return APR_SUCCESS;
}
apr_pool_tag(conn->scpool, "proxy_conn_scpool");
}
- if (worker->hmax && worker->cp->res) {
+ if (worker->s->hmax && worker->cp->res) {
conn->inreslist = 1;
apr_reslist_release(worker->cp->res, (void *)conn);
}
*/
PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
+ proxy_balancer *balancer,
proxy_server_conf *conf,
- proxy_balancer *balancer
const char *url)
{
proxy_worker *worker;
* scheme://hostname[:port] matches between worker and url.
*/
for (i = 0; i < end; i++) {
- if ( ((worker_name_length = strlen(worker->name)) <= url_length)
+ if ( ((worker_name_length = strlen(worker->s->name)) <= url_length)
&& (worker_name_length >= min_match)
&& (worker_name_length > max_match)
- && (strncmp(url_copy, worker->name, worker_name_length) == 0) ) {
+ && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) {
max_worker = worker;
max_match = worker_name_length;
}
* shared. This allows for dynamic addition during
* config and runtime.
*/
-PROXY_DECLARE(const char *) ap_proxy_define_worker(apr_pool_t *p,
+PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p,
proxy_worker **worker,
proxy_balancer *balancer,
proxy_server_conf *conf,
*/
if (balancer)
*worker = apr_array_push(balancer->workers);
- else
- *worker = apr_array_push(conf->workers)
- memset(*worker, 0, sizeof(proxy_worker));
+ else if (conf)
+ *worker = apr_array_push(conf->workers);
+ else {
+ proxy_worker *w = apr_palloc(p, sizeof(proxy_worker));
+ *worker = w;
+ }
+
+ memset(*worker, 0, sizeof(proxy_worker));
/* right here we just want to tuck away the worker info.
* if called during config, we don't have shm setup yet,
* so just note the info for later. */
- wstatus = malloc(sizeof(proxy_worker_shared);) /* will be freed ap_proxy_create_worker */
- memset(*wstatus, 0, sizeof(proxy_worker_shared));
+ wstatus = malloc(sizeof(proxy_worker_shared)); /* will be freed ap_proxy_share_worker */
+ memset(wstatus, 0, sizeof(proxy_worker_shared));
- (*worker)->hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_DEFAULT);
- (*worker)->cp = NULL;
- (*worker)->mutex = NULL;
- (*worker)->balancer = balancer;
PROXY_STRNCPY(wstatus->name, apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD));
PROXY_STRNCPY(wstatus->scheme, uri.scheme);
wstatus->flush_packets = flush_off;
wstatus->flush_wait = PROXY_FLUSH_WAIT;
wstatus->smax = -1;
- wstatus->hash = (*worker)->hash;
+ wstatus->hash = ap_proxy_hashfunc(wstatus->name, PROXY_HASHFUNC_DEFAULT);
+
+ (*worker)->hash = wstatus->hash;
+ (*worker)->cp = NULL;
+ (*worker)->mutex = NULL;
+ (*worker)->balancer = balancer;
(*worker)->s = wstatus;
/*
* Create an already defined worker and free up memory
*/
-PROXY_DECLARE(void) ap_proxy_create_worker(proxy_worker *worker, proxy_worker_shared *shm, int i)
+PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i)
{
memcpy(shm, worker->s, sizeof(proxy_worker_shared));
free(worker->s); /* was malloced in ap_proxy_define_worker */
if (worker->s->hmax == 0 || worker->s->hmax > mpm_threads) {
worker->s->hmax = mpm_threads;
}
- if (worker->s->smax == -1 || worker->s->smax > worker->hmax) {
+ if (worker->s->smax == -1 || worker->s->smax > worker->s->hmax) {
worker->s->smax = worker->s->hmax;
}
/* Set min to be lower then smax */
/* This will supress the apr_reslist creation */
worker->s->min = worker->s->smax = worker->s->hmax = 0;
}
- if (worker->hmax) {
+ if (worker->s->hmax) {
rv = apr_reslist_create(&(worker->cp->res),
- worker->min, worker->smax,
- worker->hmax, worker->ttl,
+ worker->s->min, worker->s->smax,
+ worker->s->hmax, worker->s->ttl,
connection_constructor, connection_destructor,
worker, worker->cp->pool);
getpid(), worker->s->hostname, worker->s->min,
worker->s->hmax, worker->s->smax);
-#if (APR_MAJOR_VERSION > 0)
/* Set the acquire timeout */
if (rv == APR_SUCCESS && worker->s->acquire_set) {
apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
}
-#endif
+
}
else
{
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
- getpid(), worker->hostname);
+ getpid(), worker->s->hostname);
}
if (rv == APR_SUCCESS) {
worker->s->status |= (PROXY_WORKER_INITIALIZED);
server_rec *s)
{
if (worker->s->status & PROXY_WORKER_IN_ERROR) {
- if (apr_time_now() > worker->s->error_time + worker->retry) {
+ if (apr_time_now() > worker->s->error_time + worker->s->retry) {
++worker->s->retries;
worker->s->status &= ~PROXY_WORKER_IN_ERROR;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: %s: worker for (%s) has been marked for retry",
- proxy_function, worker->hostname);
+ proxy_function, worker->s->hostname);
return OK;
}
else {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: %s: too soon to retry worker for (%s)",
- proxy_function, worker->hostname);
+ proxy_function, worker->s->hostname);
return DECLINED;
}
}
access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
if (access_status == DECLINED && *balancer == NULL) {
- *worker = ap_proxy_get_worker(r->pool, conf, *url);
+ *worker = ap_proxy_get_worker(r->pool, NULL, conf, *url);
if (*worker) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
"proxy: %s: found worker %s for %s",
- (*worker)->scheme, (*worker)->name, *url);
+ (*worker)->s->scheme, (*worker)->s->name, *url);
*balancer = NULL;
access_status = OK;
if (!PROXY_WORKER_IS_USABLE(worker)) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
"proxy: %s: disabled connection for (%s)",
- proxy_function, worker->hostname);
+ proxy_function, worker->s->hostname);
return HTTP_SERVICE_UNAVAILABLE;
}
}
- if (worker->hmax && worker->cp->res) {
+ if (worker->s->hmax && worker->cp->res) {
rv = apr_reslist_acquire(worker->cp->res, (void **)conn);
}
else
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
"proxy: %s: failed to acquire connection for (%s)",
- proxy_function, worker->hostname);
+ proxy_function, worker->s->hostname);
return HTTP_SERVICE_UNAVAILABLE;
}
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: %s: has acquired connection for (%s)",
- proxy_function, worker->hostname);
+ proxy_function, worker->s->hostname);
(*conn)->worker = worker;
(*conn)->close = 0;
{
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
"proxy: %s: has released connection for (%s)",
- proxy_function, conn->worker->hostname);
+ proxy_function, conn->worker->s->hostname);
connection_cleanup(conn);
return OK;
*
* TODO: Handle this much better...
*/
- if (!conn->hostname || !worker->is_address_reusable ||
- worker->disablereuse ||
+ if (!conn->hostname || !worker->s->is_address_reusable ||
+ worker->s->disablereuse ||
(r->connection->keepalives &&
(r->proxyreq == PROXYREQ_PROXY || r->proxyreq == PROXYREQ_REVERSE) &&
(strcasecmp(conn->hostname, uri->hostname) != 0) ) ) {
"proxy: %s: error creating fam %d socket for target %s",
proxy_function,
backend_addr->family,
- worker->hostname);
+ worker->s->hostname);
/*
* this could be an IPv6 address from the DNS but the
* local machine won't give us an IPv6 socket; hopefully the
}
conn->connection = NULL;
- if (worker->recv_buffer_size > 0 &&
+ if (worker->s->recv_buffer_size > 0 &&
(rv = apr_socket_opt_set(newsock, APR_SO_RCVBUF,
- worker->recv_buffer_size))) {
+ worker->s->recv_buffer_size))) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
"apr_socket_opt_set(SO_RCVBUF): Failed to set "
"ProxyReceiveBufferSize, using default");
}
/* Set a timeout for connecting to the backend on the socket */
- if (worker->conn_timeout_set) {
- apr_socket_timeout_set(newsock, worker->conn_timeout);
+ if (worker->s->conn_timeout_set) {
+ apr_socket_timeout_set(newsock, worker->s->conn_timeout);
}
- else if (worker->timeout_set) {
- apr_socket_timeout_set(newsock, worker->timeout);
+ else if (worker->s->timeout_set) {
+ apr_socket_timeout_set(newsock, worker->s->timeout);
}
else if (conf->timeout_set) {
apr_socket_timeout_set(newsock, conf->timeout);
apr_socket_timeout_set(newsock, s->timeout);
}
/* Set a keepalive option */
- if (worker->keepalive) {
+ if (worker->s->keepalive) {
if ((rv = apr_socket_opt_set(newsock,
APR_SO_KEEPALIVE, 1)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
}
ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s,
"proxy: %s: fam %d socket created to connect to %s",
- proxy_function, backend_addr->family, worker->hostname);
+ proxy_function, backend_addr->family, worker->s->hostname);
if (conf->source_address_set) {
local_addr = apr_pcalloc(conn->pool, sizeof(apr_sockaddr_t));
"proxy: %s: attempt to connect to %pI (%s) failed",
proxy_function,
backend_addr,
- worker->hostname);
+ worker->s->hostname);
backend_addr = backend_addr->next;
continue;
}
/* Set a timeout on the socket */
- if (worker->timeout_set) {
- apr_socket_timeout_set(newsock, worker->timeout);
+ if (worker->s->timeout_set) {
+ apr_socket_timeout_set(newsock, worker->s->timeout);
}
else if (conf->timeout_set) {
apr_socket_timeout_set(newsock, conf->timeout);
"via http CONNECT through %pI (%s) failed",
proxy_function,
forward->target_host, forward->target_port,
- backend_addr, worker->hostname);
+ backend_addr, worker->s->hostname);
backend_addr = backend_addr->next;
continue;
}
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
"ap_proxy_connect_backend disabling worker for (%s) for %"
APR_TIME_T_FMT "s",
- worker->hostname, apr_time_sec(worker->retry));
+ worker->s->hostname, apr_time_sec(worker->s->retry));
}
else {
if (worker->s->retries) {