/* return the sizeof of one lb_worker in scoreboard. */
static int ap_proxy_lb_worker_size(void)
{
- return sizeof(proxy_worker_stat);
+ return sizeof(proxy_worker_shared);
}
/*
#include "apr_reslist.h"
#define APR_WANT_STRFUNC
#include "apr_want.h"
+#include "apr_global_mutex.h"
#include "httpd.h"
#include "http_config.h"
apr_array_header_t *aliases;
apr_array_header_t *noproxies;
apr_array_header_t *dirconn;
- apr_array_header_t *workers;
- apr_array_header_t *balancers;
+ apr_array_header_t *workers; /* non-balancer workers, eg ProxyPass http://example.com */
+ apr_array_header_t *balancers; /* list of balancers @ config time */
proxy_worker *forward; /* forward proxy worker */
proxy_worker *reverse; /* reverse "module-driven" proxy worker */
const char *domain; /* domain name to use in absence of a domain name in the request */
status_full
} proxy_status; /* Status display options */
apr_sockaddr_t *source_address;
-
+ apr_global_mutex_t *mutex; /* global lock for updating lb params */
+
int req_set:1;
int viaopt_set:1;
int recv_buffer_size_set:1;
/* default worker retry timeout in seconds */
#define PROXY_WORKER_DEFAULT_RETRY 60
-#define PROXY_WORKER_MAX_ROUTE_SIZ 63
+#define PROXY_WORKER_MAX_SCHEME_SIZE 16
+#define PROXY_WORKER_MAX_ROUTE_SIZE 64
+#define PROXY_WORKER_MAX_NAME_SIZE 96
+
/* Runtime worker status informations. Shared in scoreboard */
typedef struct {
- apr_time_t error_time; /* time of the last error */
+ char name[PROXY_WORKER_MAX_NAME_SIZE];
+ char scheme[PROXY_WORKER_MAX_SCHEME_SIZE]; /* scheme to use ajp|http|https */
+ char hostname[PROXY_WORKER_MAX_ROUTE_SIZE]; /* remote backend address */
+ char route[PROXY_WORKER_MAX_ROUTE_SIZE]; /* balancing route */
+ char redirect[PROXY_WORKER_MAX_ROUTE_SIZE]; /* temporary balancing redirection route */
+ char flusher[PROXY_WORKER_MAX_SCHEME_SIZE]; /* flush provider used by mod_proxy_fdpass */
+ int lbset; /* load balancer cluster set */
int status;
int retries; /* number of retries on this worker */
int lbstatus; /* Current lbstatus */
int lbfactor; /* dynamic lbfactor */
- apr_off_t transferred;/* Number of bytes transferred to remote */
- apr_off_t read; /* Number of bytes read from remote */
- apr_size_t elected; /* Number of times the worker was elected */
- char route[PROXY_WORKER_MAX_ROUTE_SIZ+1];
- char redirect[PROXY_WORKER_MAX_ROUTE_SIZ+1];
- void *context; /* general purpose storage */
- apr_size_t busy; /* busyness factor */
- int lbset; /* load balancer cluster set */
- unsigned int apr_hash; /* hash #0 of worker name */
- unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */
-} proxy_worker_stat;
-
-/* Worker configuration */
-struct proxy_worker {
- const char *name;
- const char *scheme; /* scheme to use ajp|http|https */
- const char *hostname; /* remote backend address */
- const char *route; /* balancing route */
- const char *redirect; /* temporary balancing redirection route */
- int id; /* scoreboard id */
- int status; /* temporary worker status */
- int lbfactor; /* initial load balancing factor */
- int lbset; /* load balancer cluster set */
int min; /* Desired minimum number of available connections */
int smax; /* Soft maximum on the total number of connections */
int hmax; /* Hard maximum on the total number of connections */
+ int flush_wait; /* poll wait time in microseconds if flush_auto */
+ int index; /* shm array index */
+ unsigned int apr_hash; /* hash #0 of worker name */
+ unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */
+ enum {
+ flush_off,
+ flush_on,
+ flush_auto
+ } flush_packets; /* control AJP flushing */
+ apr_time_t error_time; /* time of the last error */
apr_interval_time_t ttl; /* maximum amount of time in seconds a connection
* may be available while exceeding the soft limit */
- apr_interval_time_t retry; /* retry interval */
+ apr_interval_time_t retry; /* retry interval */
apr_interval_time_t timeout; /* connection timeout */
apr_interval_time_t acquire; /* acquire timeout when the maximum number of connections is exceeded */
apr_interval_time_t ping_timeout;
apr_interval_time_t conn_timeout;
apr_size_t recv_buffer_size;
apr_size_t io_buffer_size;
+ apr_size_t elected; /* Number of times the worker was elected */
+ apr_size_t busy; /* busyness factor */
apr_port_t port;
- char keepalive;
- char disablereuse;
- int is_address_reusable:1;
- proxy_conn_pool *cp; /* Connection pool to use */
- proxy_worker_stat *s; /* Shared data */
- void *opaque; /* per scheme worker data */
+ apr_off_t transferred;/* Number of bytes transferred to remote */
+ apr_off_t read; /* Number of bytes read from remote */
+ void *context; /* general purpose storage */
+ unsigned int keepalive:1;
+ unsigned int disablereuse:1;
+ unsigned int is_address_reusable:1;
+ unsigned int retry_set:1;
+ unsigned int timeout_set:1;
+ unsigned int acquire_set:1;
+ unsigned int ping_timeout_set:1;
+ unsigned int conn_timeout_set:1;
+ unsigned int recv_buffer_size_set:1;
+ unsigned int io_buffer_size_set:1;
+ unsigned int keepalive_set:1;
+ unsigned int disablereuse_set:1;
+} proxy_worker_shared;
+
+/* Worker configuration */
+struct proxy_worker {
+ proxy_conn_pool *cp; /* Connection pool to use */
+ proxy_worker_shared *s; /* Shared data */
void *context; /* general purpose storage */
- enum {
- flush_off,
- flush_on,
- flush_auto
- } flush_packets; /* control AJP flushing */
- int flush_wait; /* poll wait time in microseconds if flush_auto */
- const char *flusher; /* flush provider used by mod_proxy_fdpass */
#if APR_HAS_THREADS
- apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */
+ apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */
#endif
-
- int retry_set:1;
- int timeout_set:1;
- int acquire_set:1;
- int ping_timeout_set:1;
- int conn_timeout_set:1;
- int recv_buffer_size_set:1;
- int io_buffer_size_set:1;
- int keepalive_set:1;
- int disablereuse_set:1;
- unsigned int apr_hash; /* hash #0 of worker name */
- unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */
};
/*
#define PROXY_FLUSH_WAIT 10000
struct proxy_balancer {
- apr_array_header_t *workers; /* array of proxy_workers */
+ apr_array_header_t *cw; /* initially configured workers */
+ proxy_worker **workers; /* array of proxy_workers - runtime*/
+ int max_workers; /* maximum number of allowed workers */
const char *name; /* name of the load balancer */
apr_interval_time_t timeout; /* Timeout for waiting on free connection */
+ const char *lbprovider; /* name of the lbmethod provider to use */
proxy_balancer_method *lbmethod;
const char *sticky_path; /* URL sticky session identifier */
int sticky_force:1; /* Disable failover for sticky sessions */
int scolonsep:1; /* true if ';' seps sticky session paths */
int max_attempts_set:1;
-#if APR_HAS_THREADS
- apr_thread_mutex_t *mutex; /* Thread lock for updating lb params */
-#endif
- void *context; /* general purpose storage */
- apr_time_t updated; /* timestamp of last update */
+ void *context; /* general purpose storage */
+ apr_time_t updated; /* timestamp of last update */
};
struct proxy_balancer_method {
#define PROXY_THREAD_UNLOCK(x) APR_SUCCESS
#endif
+#define PROXY_GLOBAL_LOCK(x) apr_global_mutex_lock((x)->mutex)
+#define PROXY_GLOBAL_UNLOCK(x) apr_global_mutex_unlock((x)->mutex)
+
/* hooks */
/* Create a set of PROXY_DECLARE(type), PROXY_DECLARE_NONSTD(type) and
#include "apr_uuid.h"
#include "apr_date.h"
+static const char *balancer_mutex_type = "proxy-balancer-shm";
+
module AP_MODULE_DECLARE_DATA proxy_balancer_module;
static char balancer_nonce[APR_UUID_FORMATTED_LENGTH + 1];
+/*
+ * Register our mutex type before the config is read so we
+ * can adjust the mutex settings using the Mutex directive.
+ */
+static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
+ apr_pool_t *ptemp)
+{
+ ap_mutex_register(pconf, balancer_mutex_type, NULL, APR_LOCK_DEFAULT, 0);
+ return OK;
+}
+
#if 0
extern void proxy_update_members(proxy_balancer **balancer, request_rec *r,
- proxy_server_conf *conf);
+ proxy_server_conf *conf);
#endif
static int proxy_balancer_canon(request_rec *r, char *url)
int worker_is_initialized;
worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(*workers);
if (!worker_is_initialized) {
- proxy_worker_stat *slot;
+ proxy_worker_shared *slot;
/*
* If the worker is not initialized check whether its scoreboard
* slot is already initialized.
*/
- slot = (proxy_worker_stat *) ap_get_scoreboard_lb((*workers)->id);
+ slot = (proxy_worker_shared *) ap_get_scoreboard_lb((*workers)->id);
if (slot) {
worker_is_initialized = slot->status & PROXY_WORKER_INITIALIZED;
}
{
proxy_worker *candidate = NULL;
apr_status_t rv;
+ proxy_server_conf *conf = (proxy_server_conf *)
+ ap_get_module_config(r->server->module_config, &proxy_module);
- if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Lock failed for find_best_worker()", balancer->name);
return NULL;
candidate->s->elected++;
/*
- PROXY_THREAD_UNLOCK(balancer);
+ PROXY_GLOBAL_UNLOCK(conf);
return NULL;
*/
- if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for find_best_worker()", balancer->name);
}
/* Step 2: Lock the LoadBalancer
* XXX: perhaps we need the process lock here
*/
- if ((rv = PROXY_THREAD_LOCK(*balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Lock failed for pre_request",
(*balancer)->name);
ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server,
"proxy: BALANCER: (%s). All workers are in error state for route (%s)",
(*balancer)->name, route);
- if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for pre_request",
(*balancer)->name);
}
}
- if ((rv = PROXY_THREAD_UNLOCK(*balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for pre_request",
(*balancer)->name);
apr_status_t rv;
- if ((rv = PROXY_THREAD_LOCK(balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_LOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Lock failed for post_request",
balancer->name);
}
}
- if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
+ if ((rv = PROXY_GLOBAL_UNLOCK(conf)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
"proxy: BALANCER: (%s). Unlock failed for post_request",
balancer->name);
}
/* post_config hook: */
-static int balancer_init(apr_pool_t *p, apr_pool_t *plog,
+static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
apr_uuid_t uuid;
void *data;
+ apr_status_t rv;
+ void *sconf = s->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module);
const char *userdata_key = "mod_proxy_balancer_init";
/* balancer_init() will be called twice during startup. So, only
* set up the static data the 1st time through. */
apr_pool_userdata_get(&data, userdata_key, s->process->pool);
if (!data) {
- /* Retrieve a UUID and store the nonce for the lifetime of
- * the process. */
- apr_uuid_get(&uuid);
- apr_uuid_format(balancer_nonce, &uuid);
apr_pool_userdata_set((const void *)1, userdata_key,
apr_pool_cleanup_null, s->process->pool);
}
+ /* Retrieve a UUID and store the nonce for the lifetime of
+ * the process. */
+ apr_uuid_get(&uuid);
+ apr_uuid_format(balancer_nonce, &uuid);
+
+ /* Create global mutex */
+ rv = ap_global_mutex_create(&conf->mutex, NULL, balancer_mutex_type, NULL,
+ s, pconf, 0);
+ if (rv != APR_SUCCESS) {
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
return OK;
}
return OK;
}
-static void child_init(apr_pool_t *p, server_rec *s)
+static void balancer_child_init(apr_pool_t *p, server_rec *s)
{
while (s) {
- void *sconf = s->module_config;
- proxy_server_conf *conf;
proxy_balancer *balancer;
int i;
- conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
-
+ void *sconf = s->module_config;
+ proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module);
+ apr_status_t rv;
+
+ /* Re-open the mutex for the child. */
+ rv = apr_global_mutex_child_init(&conf->mutex,
+ apr_global_mutex_lockfile(conf->mutex),
+ p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s,
+ "Failed to reopen mutex %s in child",
+ balancer_mutex_type);
+ exit(1); /* Ugly, but what else? */
+ }
+
/* Initialize shared scoreboard data */
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++) {
*/
static const char *const aszPred[] = { "mpm_winnt.c", NULL};
/* manager handler */
- ap_hook_post_config(balancer_init, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_post_config(balancer_post_config, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_pre_config(balancer_pre_config, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(balancer_handler, NULL, NULL, APR_HOOK_FIRST);
- ap_hook_child_init(child_init, aszPred, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_init(balancer_child_init, aszPred, NULL, APR_HOOK_MIDDLE);
proxy_hook_pre_request(proxy_balancer_pre_request, NULL, NULL, APR_HOOK_FIRST);
proxy_hook_post_request(proxy_balancer_post_request, NULL, NULL, APR_HOOK_FIRST);
proxy_hook_canon_handler(proxy_balancer_canon, NULL, NULL, APR_HOOK_FIRST);
proxy_worker *worker,
server_rec *s)
{
- proxy_worker_stat *score = NULL;
+ proxy_worker_shared *score = NULL;
if (PROXY_WORKER_IS_INITIALIZED(worker)) {
/* The worker share is already initialized */
if (!worker->s) {
/* Get scoreboard slot */
if (ap_scoreboard_image) {
- score = (proxy_worker_stat *) ap_get_scoreboard_lb(worker->id);
+ score = (proxy_worker_shared *) ap_get_scoreboard_lb(worker->id);
if (!score) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
"proxy: ap_get_scoreboard_lb(%d) failed in child %" APR_PID_T_FMT " for worker %s",
}
}
if (!score) {
- score = (proxy_worker_stat *) apr_pcalloc(conf->pool, sizeof(proxy_worker_stat));
+ score = (proxy_worker_shared *) apr_pcalloc(conf->pool, sizeof(proxy_worker_shared));
ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
"proxy: initialized plain memory in child %" APR_PID_T_FMT " for worker %s",
getpid(), worker->name);