#define PROXY_COPY_CONF_PARAMS(w, c) \
do { \
- (w)->timeout = (c)->timeout; \
- (w)->timeout_set = (c)->timeout_set; \
- (w)->recv_buffer_size = (c)->recv_buffer_size; \
- (w)->recv_buffer_size_set = (c)->recv_buffer_size_set; \
- (w)->io_buffer_size = (c)->io_buffer_size; \
- (w)->io_buffer_size_set = (c)->io_buffer_size_set; \
+ (w)->s->timeout = (c)->timeout; \
+ (w)->s->timeout_set = (c)->timeout_set; \
+ (w)->s->recv_buffer_size = (c)->recv_buffer_size; \
+ (w)->s->recv_buffer_size_set = (c)->recv_buffer_size_set; \
+ (w)->s->io_buffer_size = (c)->io_buffer_size; \
+ (w)->s->io_buffer_size_set = (c)->io_buffer_size_set; \
} while (0)
static const char *set_worker_param(apr_pool_t *p,
/* Normalized load factor. Used with BalancerMamber,
* it is a number between 1 and 100.
*/
- worker->lbfactor = atoi(val);
- if (worker->lbfactor < 1 || worker->lbfactor > 100)
+ worker->s->lbfactor = atoi(val);
+ if (worker->s->lbfactor < 1 || worker->s->lbfactor > 100)
return "LoadFactor must be number between 1..100";
}
else if (!strcasecmp(key, "retry")) {
ival = atoi(val);
if (ival < 0)
return "Retry must be a positive value";
- worker->retry = apr_time_from_sec(ival);
- worker->retry_set = 1;
+ worker->s->retry = apr_time_from_sec(ival);
+ worker->s->retry_set = 1;
}
else if (!strcasecmp(key, "ttl")) {
/* Time in seconds that will destroy all the connections
ival = atoi(val);
if (ival < 1)
return "TTL must be at least one second";
- worker->ttl = apr_time_from_sec(ival);
+ worker->s->ttl = apr_time_from_sec(ival);
}
else if (!strcasecmp(key, "min")) {
/* Initial number of connections to remote
ival = atoi(val);
if (ival < 0)
return "Min must be a positive number";
- worker->min = ival;
+ worker->s->min = ival;
}
else if (!strcasecmp(key, "max")) {
/* Maximum number of connections to remote
ival = atoi(val);
if (ival < 0)
return "Max must be a positive number";
- worker->hmax = ival;
+ worker->s->hmax = ival;
}
/* XXX: More inteligent naming needed */
else if (!strcasecmp(key, "smax")) {
ival = atoi(val);
if (ival < 0)
return "Smax must be a positive number";
- worker->smax = ival;
+ worker->s->smax = ival;
}
else if (!strcasecmp(key, "acquire")) {
/* Acquire timeout in given unit (default is milliseconds).
return "Acquire timeout has wrong format";
if (timeout < 1000)
return "Acquire must be at least one millisecond";
- worker->acquire = timeout;
- worker->acquire_set = 1;
+ worker->s->acquire = timeout;
+ worker->s->acquire_set = 1;
}
else if (!strcasecmp(key, "timeout")) {
/* Connection timeout in seconds.
ival = atoi(val);
if (ival < 1)
return "Timeout must be at least one second";
- worker->timeout = apr_time_from_sec(ival);
- worker->timeout_set = 1;
+ worker->s->timeout = apr_time_from_sec(ival);
+ worker->s->timeout_set = 1;
}
else if (!strcasecmp(key, "iobuffersize")) {
long s = atol(val);
if (s < 512 && s) {
return "IOBufferSize must be >= 512 bytes, or 0 for system default.";
}
- worker->io_buffer_size = (s ? s : AP_IOBUFSIZE);
- worker->io_buffer_size_set = 1;
+ worker->s->io_buffer_size = (s ? s : AP_IOBUFSIZE);
+ worker->s->io_buffer_size_set = 1;
}
else if (!strcasecmp(key, "receivebuffersize")) {
ival = atoi(val);
if (ival < 512 && ival != 0) {
return "ReceiveBufferSize must be >= 512 bytes, or 0 for system default.";
}
- worker->recv_buffer_size = ival;
- worker->recv_buffer_size_set = 1;
+ worker->s->recv_buffer_size = ival;
+ worker->s->recv_buffer_size_set = 1;
}
else if (!strcasecmp(key, "keepalive")) {
if (!strcasecmp(val, "on"))
- worker->keepalive = 1;
+ worker->s->keepalive = 1;
else if (!strcasecmp(val, "off"))
- worker->keepalive = 0;
+ worker->s->keepalive = 0;
else
return "KeepAlive must be On|Off";
- worker->keepalive_set = 1;
+ worker->s->keepalive_set = 1;
}
else if (!strcasecmp(key, "disablereuse")) {
if (!strcasecmp(val, "on"))
- worker->disablereuse = 1;
+ worker->s->disablereuse = 1;
else if (!strcasecmp(val, "off"))
- worker->disablereuse = 0;
+ worker->s->disablereuse = 0;
else
return "DisableReuse must be On|Off";
- worker->disablereuse_set = 1;
+ worker->s->disablereuse_set = 1;
}
else if (!strcasecmp(key, "route")) {
/* Worker route.
*/
- if (strlen(val) > PROXY_WORKER_MAX_ROUTE_SIZ)
+ if (strlen(val) >= PROXY_WORKER_MAX_ROUTE_SIZE)
return "Route length must be < 64 characters";
- worker->route = apr_pstrdup(p, val);
+ PROXY_STRNCPY(worker->s->route, val);
}
else if (!strcasecmp(key, "redirect")) {
/* Worker redirection route.
*/
- if (strlen(val) > PROXY_WORKER_MAX_ROUTE_SIZ)
+ if (strlen(val) >= PROXY_WORKER_MAX_ROUTE_SIZE)
return "Redirect length must be < 64 characters";
- worker->redirect = apr_pstrdup(p, val);
+ PROXY_STRNCPY(worker->s->redirect, val);
}
else if (!strcasecmp(key, "status")) {
const char *v;
}
if (*v == 'D' || *v == 'd') {
if (mode)
- worker->status |= PROXY_WORKER_DISABLED;
+ worker->s->status |= PROXY_WORKER_DISABLED;
else
- worker->status &= ~PROXY_WORKER_DISABLED;
+ worker->s->status &= ~PROXY_WORKER_DISABLED;
}
else if (*v == 'S' || *v == 's') {
if (mode)
- worker->status |= PROXY_WORKER_STOPPED;
+ worker->s->status |= PROXY_WORKER_STOPPED;
else
- worker->status &= ~PROXY_WORKER_STOPPED;
+ worker->s->status &= ~PROXY_WORKER_STOPPED;
}
else if (*v == 'E' || *v == 'e') {
if (mode)
- worker->status |= PROXY_WORKER_IN_ERROR;
+ worker->s->status |= PROXY_WORKER_IN_ERROR;
else
- worker->status &= ~PROXY_WORKER_IN_ERROR;
+ worker->s->status &= ~PROXY_WORKER_IN_ERROR;
}
else if (*v == 'H' || *v == 'h') {
if (mode)
- worker->status |= PROXY_WORKER_HOT_STANDBY;
+ worker->s->status |= PROXY_WORKER_HOT_STANDBY;
else
- worker->status &= ~PROXY_WORKER_HOT_STANDBY;
+ worker->s->status &= ~PROXY_WORKER_HOT_STANDBY;
}
else if (*v == 'I' || *v == 'i') {
if (mode)
- worker->status |= PROXY_WORKER_IGNORE_ERRORS;
+ worker->s->status |= PROXY_WORKER_IGNORE_ERRORS;
else
- worker->status &= ~PROXY_WORKER_IGNORE_ERRORS;
+ worker->s->status &= ~PROXY_WORKER_IGNORE_ERRORS;
}
else {
return "Unknown status parameter option";
}
else if (!strcasecmp(key, "flushpackets")) {
if (!strcasecmp(val, "on"))
- worker->flush_packets = flush_on;
+ worker->s->flush_packets = flush_on;
else if (!strcasecmp(val, "off"))
- worker->flush_packets = flush_off;
+ worker->s->flush_packets = flush_off;
else if (!strcasecmp(val, "auto"))
- worker->flush_packets = flush_auto;
+ worker->s->flush_packets = flush_auto;
else
return "flushpackets must be on|off|auto";
}
return "flushwait must be <= 1000, or 0 for system default of 10 millseconds.";
}
if (ival == 0)
- worker->flush_wait = PROXY_FLUSH_WAIT;
+ worker->s->flush_wait = PROXY_FLUSH_WAIT;
else
- worker->flush_wait = ival * 1000; /* change to microseconds */
+ worker->s->flush_wait = ival * 1000; /* change to microseconds */
}
else if (!strcasecmp(key, "ping")) {
/* Ping/Pong timeout in given unit (default is second).
return "Ping/Pong timeout has wrong format";
if (timeout < 1000)
return "Ping/Pong timeout must be at least one millisecond";
- worker->ping_timeout = timeout;
- worker->ping_timeout_set = 1;
+ worker->s->ping_timeout = timeout;
+ worker->s->ping_timeout_set = 1;
}
else if (!strcasecmp(key, "lbset")) {
ival = atoi(val);
if (ival < 0 || ival > 99)
return "lbset must be between 0 and 99";
- worker->lbset = ival;
+ worker->s->lbset = ival;
}
else if (!strcasecmp(key, "connectiontimeout")) {
/* Request timeout in given unit (default is second).
return "Connectiontimeout has wrong format";
if (timeout < 1000)
return "Connectiontimeout must be at least one millisecond.";
- worker->conn_timeout = timeout;
- worker->conn_timeout_set = 1;
+ worker->s->conn_timeout = timeout;
+ worker->s->conn_timeout_set = 1;
}
else if (!strcasecmp(key, "flusher")) {
- worker->flusher = apr_pstrdup(p, val);
+ if (strlen(val) >= PROXY_WORKER_MAX_SCHEME_SIZE)
+ return "flusher name length must be < 16 characters";
+ PROXY_STRNCPY(worker->s->flusher, val);
}
else {
return "unknown Worker parameter";
arr = apr_table_elts(params);
elts = (const apr_table_entry_t *)arr->elts;
/* Distinguish the balancer from worker */
- if (strncasecmp(r, "balancer:", 9) == 0) {
+ if (ap_proxy_valid_balancer_name(r)) {
proxy_balancer *balancer = ap_proxy_get_balancer(cmd->pool, conf, r);
if (!balancer) {
- const char *err = ap_proxy_add_balancer(&balancer,
+ const char *err = ap_proxy_alloc_balancer(&balancer,
cmd->pool,
conf, r);
if (err)
/* Try to find the balancer */
balancer = ap_proxy_get_balancer(cmd->temp_pool, conf, path);
if (!balancer) {
- const char *err = ap_proxy_add_balancer(&balancer,
+ const char *err = ap_proxy_alloc_balancer(&balancer,
cmd->pool,
conf, path);
if (err)
name = ap_getword_conf(cmd->temp_pool, &arg);
}
- if (strncasecmp(name, "balancer:", 9) == 0) {
+ if (ap_proxy_valid_balancer_name(name) {
balancer = ap_proxy_get_balancer(cmd->pool, conf, name);
if (!balancer) {
if (in_proxy_section) {
- err = ap_proxy_add_balancer(&balancer,
+ err = ap_proxy_alloc_balancer(&balancer,
cmd->pool,
conf, name);
if (err)
return apr_pstrcat(cmd->pool, thiscmd->name,
"> arguments are not supported for non url.",
NULL);
- if (strncasecmp(conf->p, "balancer:", 9) == 0) {
+ if (ap_proxy_valid_balancer_name(conf->p) {
balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p);
if (!balancer) {
- err = ap_proxy_add_balancer(&balancer,
+ err = ap_proxy_alloc_balancer(&balancer,
cmd->pool,
sconf, conf->p);
if (err)
#include "util_filter.h"
#include "util_ebcdic.h"
#include "ap_provider.h"
+#include "ap_slotmem.h"
#if APR_HAVE_NETINET_IN_H
#include <netinet/in.h>
enc_path, enc_search, enc_user, enc_fpath, enc_parm
};
+#define BALANCER_PREFIX "balancer://"
+
#if APR_CHARSET_EBCDIC
#define CRLF "\r\n"
#else /*APR_CHARSET_EBCDIC*/
int close:1; /* Close 'this' connection */
int need_flush:1; /* Flag to decide whether we need to flush the
* filter chain or not */
-#if APR_HAS_THREADS
int inreslist:1; /* connection in apr_reslist? */
-#endif
} proxy_conn_rec;
typedef struct {
struct proxy_conn_pool {
apr_pool_t *pool; /* The pool used in constructor and destructor calls */
apr_sockaddr_t *addr; /* Preparsed remote address info */
-#if APR_HAS_THREADS
apr_reslist_t *res; /* Connection resource list */
-#endif
proxy_conn_rec *conn; /* Single connection for prefork mpm */
};
#define PROXY_WORKER_MAX_ROUTE_SIZE 64
#define PROXY_WORKER_MAX_NAME_SIZE 96
+#define PROXY_STRNCPY(dst, src) apr_cpystrn((dst), (src), sizeof(dst))
/* Runtime worker status informations. Shared in scoreboard */
typedef struct {
int hmax; /* Hard maximum on the total number of connections */
int flush_wait; /* poll wait time in microseconds if flush_auto */
int index; /* shm array index */
- unsigned int apr_hash; /* hash #0 of worker name */
- unsigned int our_hash; /* hash #1 of worker name. Why 2? hash collisions. */
+ unsigned int hash; /* hash of worker name */
enum {
flush_off,
flush_on,
/* Worker configuration */
struct proxy_worker {
+ int index; /* shm array index */
+ unsigned int hash; /* hash of worker name */
proxy_conn_pool *cp; /* Connection pool to use */
proxy_worker_shared *s; /* Shared data */
+ proxy_balancer *balancer; /* which balancer am I in? */
void *context; /* general purpose storage */
-#if APR_HAS_THREADS
apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */
-#endif
};
/*
#define PROXY_FLUSH_WAIT 10000
struct proxy_balancer {
- apr_array_header_t *cw; /* initially configured workers */
- proxy_worker **workers; /* array of proxy_workers - runtime*/
- int max_workers; /* maximum number of allowed workers */
- const char *name; /* name of the load balancer */
- apr_interval_time_t timeout; /* Timeout for waiting on free connection */
- const char *lbprovider; /* name of the lbmethod provider to use */
+ apr_array_header_t *workers; /* initially configured workers */
+ ap_slotmem_instance_t *slot; /* worker shm data - runtime */
+ int growth; /* number of post-config workers can added */
+ int max_workers; /* maximum number of allowed workers */
+ const char *name; /* name of the load balancer */
+ apr_interval_time_t timeout; /* Timeout for waiting on free connection */
+ const char *lbprovider; /* name of the lbmethod provider to use */
proxy_balancer_method *lbmethod;
const char *sticky_path; /* URL sticky session identifier */
apr_status_t (*updatelbstatus)(proxy_balancer *balancer, proxy_worker *elected, server_rec *s);
};
-#if APR_HAS_THREADS
#define PROXY_THREAD_LOCK(x) apr_thread_mutex_lock((x)->mutex)
#define PROXY_THREAD_UNLOCK(x) apr_thread_mutex_unlock((x)->mutex)
-#else
-#define PROXY_THREAD_LOCK(x) APR_SUCCESS
-#define PROXY_THREAD_UNLOCK(x) APR_SUCCESS
-#endif
#define PROXY_GLOBAL_LOCK(x) apr_global_mutex_lock((x)->mutex)
#define PROXY_GLOBAL_UNLOCK(x) apr_global_mutex_unlock((x)->mutex)
/* Connection pool API */
/**
* Get the worker from proxy configuration
- * @param p memory pool used for finding worker
- * @param conf current proxy server configuration
- * @param url url to find the worker from
- * @return proxy_worker or NULL if not found
+ * @param p memory pool used for finding worker
+ * @param conf current proxy server configuration
+ * @param balancer the balancer that the worker belongs to
+ * @param url url to find the worker from
+ * @return proxy_worker or NULL if not found
*/
PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
proxy_server_conf *conf,
+ proxy_balancer *balancer,
const char *url);
/**
- * Add the worker to proxy configuration
- * @param worker the new worker
- * @param p memory pool to allocate worker from
- * @param conf current proxy server configuration
- * @param url url containing worker name
- * @param id slotnumber id or -1 for auto allocation
- * @return error message or NULL if successful
- */
-PROXY_DECLARE(const char *) ap_proxy_add_worker_wid(proxy_worker **worker,
- apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url,
- int id);
-
-/**
- * Add the worker to proxy configuration
- * @param worker the new worker
- * @param p memory pool to allocate worker from
- * @param conf current proxy server configuration
- * @param url url containing worker name
- * @return error message or NULL if successful
- */
-PROXY_DECLARE(const char *) ap_proxy_add_worker(proxy_worker **worker,
- apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url);
+ * Define and Allocate space for the worker to proxy configuration
+ * @param p memory pool to allocate worker from
+ * @param worker the new worker
+ * @param balancer the balancer that the worker belongs to
+ * @param conf current proxy server configuration
+ * @param url url containing worker name
+ * @return error message or NULL if successful (*worker is new worker)
+ */
+PROXY_DECLARE(const char *) ap_proxy_define_worker(apr_pool_t *p,
+ proxy_worker **worker,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url);
/**
* Create new worker
proxy_worker *worker,
server_rec *s);
-
/**
* Initialize the worker
* @param worker worker to initialize
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker,
server_rec *s,
apr_pool_t *p);
+/**
+ * Verifies valid balancer name (eg: balancer://foo)
+ * @param name name to test
+ * @return ptr to start of name or NULL if not valid
+ */
+PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(const char *name);
+
+
/**
* Get the balancer from proxy configuration
* @param p memory pool used for temporary storage while finding balancer
PROXY_DECLARE(proxy_balancer *) ap_proxy_get_balancer(apr_pool_t *p,
proxy_server_conf *conf,
const char *url);
+
/**
- * Add the balancer to proxy configuration
- * @param balancer the new balancer
+ * Define and Allocate space for the balancer to proxy configuration
* @param p memory pool to allocate balancer from
+ * @param balancer the new balancer
* @param conf current proxy server configuration
* @param url url containing balancer name
* @return error message or NULL if successfull
*/
-PROXY_DECLARE(const char *) ap_proxy_add_balancer(proxy_balancer **balancer,
- apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url);
+PROXY_DECLARE(const char *) ap_proxy_define_balancer(apr_pool_t *p,
+ proxy_balancer **balancer,
+ proxy_server_conf *conf,
+ const char *url);
-/**
- * Add the worker to the balancer
- * @param pool memory pool for adding worker
- * @param balancer balancer to add to
- * @param worker worker to add
- * @param id slotnumber id or -1 for auto allocation
- * @note A single worker can be added to multiple balancers.
- */
-PROXY_DECLARE(void) ap_proxy_add_worker_to_balancer_wid(apr_pool_t *pool,
- proxy_balancer *balancer,
- proxy_worker *worker,
- int id);
-/**
- * Add the worker to the balancer
- * @param pool memory pool for adding worker
- * @param balancer balancer to add to
- * @param worker worker to add
- * @note A single worker can be added to multiple balancers.
- */
-PROXY_DECLARE(void) ap_proxy_add_worker_to_balancer(apr_pool_t *pool,
- proxy_balancer *balancer,
- proxy_worker *worker);
/**
* Get the most suitable worker and/or balancer for the request
* @param worker worker used for processing request
ap_get_module_config(r->server->module_config, &proxy_module);
proxy_balancer *balancer;
const char *real = ent[i].real;
+ const char *bname;
/*
* First check if mapping against a balancer and see
* if we have such a entity. If so, then we need to
* or may not be the right one... basically, we need
* to find which member actually handled this request.
*/
- if ((strncasecmp(real, "balancer://", 11) == 0) &&
- (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
+ bname = ap_proxy_valid_balancer_name(real);
+ if (bname && (balancer = ap_proxy_get_balancer(r->pool, sconf, real))) {
int n, l3 = 0;
proxy_worker **worker = (proxy_worker **)balancer->workers->elts;
- const char *urlpart = ap_strchr_c(real + 11, '/');
+ const char *urlpart = ap_strchr_c(bname, '/');
if (urlpart) {
if (!urlpart[1])
urlpart = NULL;
return ret;
}
+/*
+ * BALANCER related...
+ */
+
+/*
+ * verifies that the balancer name conforms to standards. If
+ * so, returns ptr to the actual name (BALANCER_PREFIX removed),
+ * otherwise NULL
+ */
+PROXY_DECLARE(char *) ap_proxy_valid_balancer_name(const char *name)
+{
+ if (strncasecmp(name, BALANCER_PREFIX, sizeof(BALANCER_PREFIX)) == 0)
+ return (name + sizeof(BALANCER_PREFIX));
+ else
+ return NULL;
+}
+
+
PROXY_DECLARE(proxy_balancer *) ap_proxy_get_balancer(apr_pool_t *p,
proxy_server_conf *conf,
const char *url)
{
proxy_balancer *balancer;
- char *c, *uri = apr_pstrdup(p, url);
+ char *name, *q, *uri = apr_pstrdup(p, url);
int i;
- c = strchr(uri, ':');
- if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
+ if (!(name = ap_proxy_valid_balancer_name(uri)))
return NULL;
- }
+
/* remove path from uri */
- if ((c = strchr(c + 3, '/'))) {
- *c = '\0';
- }
+ if ((q = strchr(name, '/')))
+ *q = '\0';
+
balancer = (proxy_balancer *)conf->balancers->elts;
for (i = 0; i < conf->balancers->nelts; i++) {
- if (strcasecmp(balancer->name, uri) == 0) {
+ if (strcasecmp(balancer->name, name) == 0) {
return balancer;
}
balancer++;
return NULL;
}
-PROXY_DECLARE(const char *) ap_proxy_add_balancer(proxy_balancer **balancer,
+PROXY_DECLARE(const char *) ap_proxy_define_balancer(proxy_balancer **balancer,
apr_pool_t *p,
proxy_server_conf *conf,
const char *url)
{
- char *c, *q, *uri = apr_pstrdup(p, url);
+ char *name, *q, *uri = apr_pstrdup(p, url);
proxy_balancer_method *lbmethod;
- c = strchr(uri, ':');
- if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0')
- return "Bad syntax for a balancer name";
+ /* We should never get here without a valid BALANCER_PREFIX... */
+
+ if (!(name = ap_proxy_valid_balancer_name(uri)))
+ return "Bad syntax for a balancer name";
+
/* remove path from uri */
- if ((q = strchr(c + 3, '/')))
+ if ((q = strchr(name, '/')))
*q = '\0';
-
- ap_str_tolower(uri);
+
+ ap_str_tolower(name);
*balancer = apr_array_push(conf->balancers);
memset(*balancer, 0, sizeof(proxy_balancer));
return "Can't find 'byrequests' lb method";
}
- (*balancer)->name = uri;
+ (*balancer)->name = name;
(*balancer)->lbmethod = lbmethod;
(*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker *));
(*balancer)->updated = apr_time_now();
- /* XXX Is this a right place to create mutex */
-#if APR_HAS_THREADS
- if (apr_thread_mutex_create(&((*balancer)->mutex),
- APR_THREAD_MUTEX_DEFAULT, p) != APR_SUCCESS) {
- /* XXX: Do we need to log something here */
- return "can not create thread mutex";
- }
-#endif
return NULL;
}
-PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url)
+#if 0
+/*
+ * Create an already defined balancer and free up memory.
+ * Placeholder for when we make +/- of balancers runtime as well
+ */
+PROXY_DECLARE(void) ap_proxy_create_balancer(TODO)
{
- proxy_worker *worker;
- proxy_worker *max_worker = NULL;
- int max_match = 0;
- int url_length;
- int min_match;
- int worker_name_length;
- const char *c;
- char *url_copy;
- int i;
-
- c = ap_strchr_c(url, ':');
- if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
- return NULL;
- }
-
- url_copy = apr_pstrdup(p, url);
- url_length = strlen(url);
+}
- /*
- * We need to find the start of the path and
- * therefore we know the length of the scheme://hostname/
- * part to we can force-lowercase everything up to
- * the start of the path.
- */
- c = ap_strchr_c(c+3, '/');
- if (c) {
- char *pathstart;
- pathstart = url_copy + (c - url);
- *pathstart = '\0';
- ap_str_tolower(url_copy);
- min_match = strlen(url_copy);
- *pathstart = '/';
- }
- else {
- ap_str_tolower(url_copy);
- min_match = strlen(url_copy);
- }
+PROXY_DECLARE(void) ap_proxy_initialize_balancer(TODO)
+{
+}
- worker = (proxy_worker *)conf->workers->elts;
+#endif
- /*
- * Do a "longest match" on the worker name to find the worker that
- * fits best to the URL, but keep in mind that we must have at least
- * a minimum matching of length min_match such that
- * scheme://hostname[:port] matches between worker and url.
- */
- for (i = 0; i < conf->workers->nelts; i++) {
- if ( ((worker_name_length = strlen(worker->name)) <= url_length)
- && (worker_name_length >= min_match)
- && (worker_name_length > max_match)
- && (strncmp(url_copy, worker->name, worker_name_length) == 0) ) {
- max_worker = worker;
- max_match = worker_name_length;
- }
- worker++;
- }
- return max_worker;
-}
+/*
+ * CONNECTION related...
+ */
-#if APR_HAS_THREADS
static apr_status_t conn_pool_cleanup(void *theworker)
{
proxy_worker *worker = (proxy_worker *)theworker;
}
return APR_SUCCESS;
}
-#endif
static void init_conn_pool(apr_pool_t *p, proxy_worker *worker)
{
* This pool is used for connection recycling.
* Once the worker is added it is never removed but
* it can be disabled.
- */
- apr_pool_create(&pool, p);
- apr_pool_tag(pool, "proxy_worker_cp");
- /*
- * Alloc from the same pool as worker.
- * proxy_conn_pool is permanently attached to the worker.
- */
- cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
- cp->pool = pool;
- worker->cp = cp;
-}
-
-PROXY_DECLARE(const char *) ap_proxy_add_worker_wid(proxy_worker **worker,
- apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url,
- int id)
-{
- int rv;
- apr_uri_t uri;
-
- rv = apr_uri_parse(p, url, &uri);
-
- if (rv != APR_SUCCESS) {
- return "Unable to parse URL";
- }
- if (!uri.hostname || !uri.scheme) {
- return "URL must be absolute!";
- }
-
- ap_str_tolower(uri.hostname);
- ap_str_tolower(uri.scheme);
- *worker = apr_array_push(conf->workers);
- memset(*worker, 0, sizeof(proxy_worker));
- (*worker)->name = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD);
- (*worker)->scheme = uri.scheme;
- (*worker)->hostname = uri.hostname;
- (*worker)->port = uri.port;
- if (id < 0) {
- (*worker)->id = proxy_lb_workers;
- /* Increase the total worker count */
- proxy_lb_workers++;
- } else {
- (*worker)->id = id;
- }
- (*worker)->flush_packets = flush_off;
- (*worker)->flush_wait = PROXY_FLUSH_WAIT;
- (*worker)->smax = -1;
- (*worker)->our_hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_DEFAULT);
- (*worker)->apr_hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_APR);
- (*worker)->cp = NULL;
- (*worker)->mutex = NULL;
-
- return NULL;
-}
-
-PROXY_DECLARE(const char *) ap_proxy_add_worker(proxy_worker **worker,
- apr_pool_t *p,
- proxy_server_conf *conf,
- const char *url)
-{
- return ap_proxy_add_worker_wid(worker, p, conf, url, -1);
-}
-
-PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker_wid(apr_pool_t *p, int id)
-{
-
- proxy_worker *worker;
- worker = (proxy_worker *)apr_pcalloc(p, sizeof(proxy_worker));
- if (id < 0) {
- worker->id = proxy_lb_workers;
- /* Increase the total worker count */
- proxy_lb_workers++;
- } else {
- worker->id = id;
- }
- worker->smax = -1;
- worker->cp = NULL;
- worker->mutex = NULL;
-
- return worker;
-}
-
-PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker(apr_pool_t *p)
-{
- return ap_proxy_create_worker_wid(p, -1);
-}
-
-PROXY_DECLARE(void)
-ap_proxy_add_worker_to_balancer_wid(apr_pool_t *pool, proxy_balancer *balancer,
- proxy_worker *worker, int id)
-{
- proxy_worker **runtime;
-
- runtime = apr_array_push(balancer->workers);
- *runtime = worker;
- if (id < 0) {
- (*runtime)->id = proxy_lb_workers;
- /* Increase the total runtime count */
- proxy_lb_workers++;
- } else {
- (*runtime)->id = id;
- }
- balancer->updated = apr_time_now();
-
-}
-
-PROXY_DECLARE(void)
-ap_proxy_add_worker_to_balancer(apr_pool_t *pool, proxy_balancer *balancer,
- proxy_worker *worker)
-{
- ap_proxy_add_worker_to_balancer_wid(pool, balancer, worker, -1);
-}
-
-PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
- proxy_balancer **balancer,
- request_rec *r,
- proxy_server_conf *conf, char **url)
-{
- int access_status;
-
- access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
- if (access_status == DECLINED && *balancer == NULL) {
- *worker = ap_proxy_get_worker(r->pool, conf, *url);
- if (*worker) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "proxy: %s: found worker %s for %s",
- (*worker)->scheme, (*worker)->name, *url);
-
- *balancer = NULL;
- access_status = OK;
- }
- else if (r->proxyreq == PROXYREQ_PROXY) {
- if (conf->forward) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "proxy: *: found forward proxy worker for %s",
- *url);
- *balancer = NULL;
- *worker = conf->forward;
- access_status = OK;
- /*
- * The forward worker does not keep connections alive, so
- * ensure that mod_proxy_http does the correct thing
- * regarding the Connection header in the request.
- */
- apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
- }
- }
- else if (r->proxyreq == PROXYREQ_REVERSE) {
- if (conf->reverse) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "proxy: *: found reverse proxy worker for %s",
- *url);
- *balancer = NULL;
- *worker = conf->reverse;
- access_status = OK;
- /*
- * The reverse worker does not keep connections alive, so
- * ensure that mod_proxy_http does the correct thing
- * regarding the Connection header in the request.
- */
- apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
- }
- }
- }
- else if (access_status == DECLINED && *balancer != NULL) {
- /* All the workers are busy */
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "proxy: all workers are busy. Unable to serve %s",
- *url);
- access_status = HTTP_SERVICE_UNAVAILABLE;
- }
- return access_status;
-}
-
-PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,
- proxy_balancer *balancer,
- request_rec *r,
- proxy_server_conf *conf)
-{
- int access_status = OK;
- if (balancer) {
- access_status = proxy_run_post_request(worker, balancer, r, conf);
- if (access_status == DECLINED) {
- access_status = OK; /* no post_request handler available */
- /* TODO: recycle direct worker */
- }
- }
-
- return access_status;
-}
-
-/* DEPRECATED */
-PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **newsock,
- const char *proxy_function,
- apr_sockaddr_t *backend_addr,
- const char *backend_name,
- proxy_server_conf *conf,
- request_rec *r)
-{
- apr_status_t rv;
- int connected = 0;
- int loglevel;
-
- while (backend_addr && !connected) {
- if ((rv = apr_socket_create(newsock, backend_addr->family,
- SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
- loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
- ap_log_rerror(APLOG_MARK, loglevel, rv, r,
- "proxy: %s: error creating fam %d socket for target %s",
- proxy_function,
- backend_addr->family,
- backend_name);
- /*
- * this could be an IPv6 address from the DNS but the
- * local machine won't give us an IPv6 socket; hopefully the
- * DNS returned an additional address to try
- */
- backend_addr = backend_addr->next;
- continue;
- }
-
- if (conf->recv_buffer_size > 0 &&
- (rv = apr_socket_opt_set(*newsock, APR_SO_RCVBUF,
- conf->recv_buffer_size))) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- "apr_socket_opt_set(SO_RCVBUF): Failed to set "
- "ProxyReceiveBufferSize, using default");
- }
-
- rv = apr_socket_opt_set(*newsock, APR_TCP_NODELAY, 1);
- if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- "apr_socket_opt_set(APR_TCP_NODELAY): "
- "Failed to set");
- }
-
- /* Set a timeout on the socket */
- if (conf->timeout_set) {
- apr_socket_timeout_set(*newsock, conf->timeout);
- }
- else {
- apr_socket_timeout_set(*newsock, r->server->timeout);
- }
-
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "proxy: %s: fam %d socket created to connect to %s",
- proxy_function, backend_addr->family, backend_name);
-
- if (conf->source_address) {
- rv = apr_socket_bind(*newsock, conf->source_address);
- if (rv != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- "proxy: %s: failed to bind socket to local address",
- proxy_function);
- }
- }
-
- /* make the connection out of the socket */
- rv = apr_socket_connect(*newsock, backend_addr);
-
- /* if an error occurred, loop round and try again */
- if (rv != APR_SUCCESS) {
- apr_socket_close(*newsock);
- loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
- ap_log_rerror(APLOG_MARK, loglevel, rv, r,
- "proxy: %s: attempt to connect to %pI (%s) failed",
- proxy_function,
- backend_addr,
- backend_name);
- backend_addr = backend_addr->next;
- continue;
- }
- connected = 1;
- }
- return connected ? 0 : 1;
+ */
+ apr_pool_create(&pool, p);
+ apr_pool_tag(pool, "proxy_worker_cp");
+ /*
+ * Alloc from the same pool as worker.
+ * proxy_conn_pool is permanently attached to the worker.
+ */
+ cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));
+ cp->pool = pool;
+ worker->cp = cp;
}
static apr_status_t connection_cleanup(void *theconn)
conn->r = NULL;
}
-#if APR_HAS_THREADS
/* Sanity check: Did we already return the pooled connection? */
if (conn->inreslist) {
ap_log_perror(APLOG_MARK, APLOG_ERR, 0, conn->pool,
worker->name);
return APR_SUCCESS;
}
-#endif
/* determine if the connection need to be closed */
- if (conn->close || !worker->is_address_reusable || worker->disablereuse) {
+ if (conn->close || !worker->s->is_address_reusable || worker->s->disablereuse) {
apr_pool_t *p = conn->pool;
apr_pool_clear(p);
conn = apr_pcalloc(p, sizeof(proxy_conn_rec));
apr_pool_create(&(conn->scpool), p);
apr_pool_tag(conn->scpool, "proxy_conn_scpool");
}
-#if APR_HAS_THREADS
+
if (worker->hmax && worker->cp->res) {
conn->inreslist = 1;
apr_reslist_release(worker->cp->res, (void *)conn);
}
else
-#endif
{
worker->cp->conn = conn;
}
conn->pool = ctx;
conn->scpool = scpool;
conn->worker = worker;
-#if APR_HAS_THREADS
conn->inreslist = 1;
-#endif
*resource = conn;
return APR_SUCCESS;
}
-#if APR_HAS_THREADS /* only needed when threads are used */
/* reslist destructor */
static apr_status_t connection_destructor(void *resource, void *params,
apr_pool_t *pool)
return APR_SUCCESS;
}
-#endif
/*
- * ap_proxy_initialize_worker_share() concerns itself
- * with initializing those parts of worker which
- * are, or could be, shared. Basically worker->s
+ * WORKER related...
*/
-PROXY_DECLARE(void) ap_proxy_initialize_worker_share(proxy_server_conf *conf,
- proxy_worker *worker,
- server_rec *s)
-{
- proxy_worker_shared *score = NULL;
- if (PROXY_WORKER_IS_INITIALIZED(worker)) {
- /* The worker share is already initialized */
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
- "proxy: worker %s already initialized",
- worker->name);
- return;
- }
- if (!worker->s) {
- /* Get scoreboard slot */
- if (ap_scoreboard_image) {
- score = (proxy_worker_shared *) ap_get_scoreboard_lb(worker->id);
- if (!score) {
- ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
- "proxy: ap_get_scoreboard_lb(%d) failed in child %" APR_PID_T_FMT " for worker %s",
- worker->id, getpid(), worker->name);
- }
- else {
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
- "proxy: grabbed scoreboard slot %d in child %" APR_PID_T_FMT " for worker %s",
- worker->id, getpid(), worker->name);
- }
- }
- if (!score) {
- score = (proxy_worker_shared *) apr_pcalloc(conf->pool, sizeof(proxy_worker_shared));
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
- "proxy: initialized plain memory in child %" APR_PID_T_FMT " for worker %s",
- getpid(), worker->name);
- }
- worker->s = score;
- /*
- * recheck to see if we've already been here. Possible
- * if proxy is using scoreboard to hold shared stats
- */
- if (PROXY_WORKER_IS_INITIALIZED(worker)) {
- /* The worker share is already initialized */
- ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s,
- "proxy: worker %s already initialized",
- worker->name);
- return;
- }
+PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,
+ proxy_server_conf *conf,
+ proxy_balancer *balancer
+ const char *url)
+{
+ proxy_worker *worker;
+ proxy_worker *max_worker = NULL;
+ int max_match = 0;
+ int url_length;
+ int min_match;
+ int worker_name_length;
+ const char *c;
+ char *url_copy;
+ int i, end;
+
+ c = ap_strchr_c(url, ':');
+ if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {
+ return NULL;
}
- if (worker->route) {
- strcpy(worker->s->route, worker->route);
+
+ url_copy = apr_pstrdup(p, url);
+ url_length = strlen(url);
+
+ /*
+ * We need to find the start of the path and
+ * therefore we know the length of the scheme://hostname/
+ * part to we can force-lowercase everything up to
+ * the start of the path.
+ */
+ c = ap_strchr_c(c+3, '/');
+ if (c) {
+ char *pathstart;
+ pathstart = url_copy + (c - url);
+ *pathstart = '\0';
+ ap_str_tolower(url_copy);
+ min_match = strlen(url_copy);
+ *pathstart = '/';
}
else {
- *worker->s->route = '\0';
+ ap_str_tolower(url_copy);
+ min_match = strlen(url_copy);
}
- if (worker->redirect) {
- strcpy(worker->s->redirect, worker->redirect);
+
+ if (balancer) {
+ worker = (proxy_worker *)balancer->workers->elts;
+ end = balancer->workers->nelts;
+ } else {
+ worker = (proxy_worker *)conf->workers->elts;
+ end = conf->workers->nelts;
}
- else {
- *worker->s->redirect = '\0';
+
+ /*
+ * Do a "longest match" on the worker name to find the worker that
+ * fits best to the URL, but keep in mind that we must have at least
+ * a minimum matching of length min_match such that
+ * scheme://hostname[:port] matches between worker and url.
+ */
+ for (i = 0; i < end; i++) {
+ if ( ((worker_name_length = strlen(worker->name)) <= url_length)
+ && (worker_name_length >= min_match)
+ && (worker_name_length > max_match)
+ && (strncmp(url_copy, worker->name, worker_name_length) == 0) ) {
+ max_worker = worker;
+ max_match = worker_name_length;
+ }
+ worker++;
}
+ return max_worker;
+}
- worker->s->status |= (worker->status | PROXY_WORKER_INITIALIZED);
- worker->s->apr_hash = worker->apr_hash;
- worker->s->our_hash = worker->our_hash;
+/*
+ * To create a worker from scratch first we define the
+ * specifics of the worker; this is all local data.
+ * We then allocate space for it if data needs to be
+ * shared. This allows for dynamic addition during
+ * config and runtime.
+ */
+PROXY_DECLARE(const char *) ap_proxy_define_worker(apr_pool_t *p,
+ proxy_worker **worker,
+ proxy_balancer *balancer,
+ proxy_server_conf *conf,
+ const char *url)
+{
+ int rv;
+ apr_uri_t uri;
+ proxy_worker_shared *wstatus;
+
+ rv = apr_uri_parse(p, url, &uri);
+
+ if (rv != APR_SUCCESS) {
+ return "Unable to parse URL";
+ }
+ if (!uri.hostname || !uri.scheme) {
+ return "URL must be absolute!";
+ }
+
+ ap_str_tolower(uri.hostname);
+ ap_str_tolower(uri.scheme);
+ /*
+ * Workers can be associated w/ balancers or on their
+ * own; ie: the generic reverse-proxy or a worker
+ * in a simple ProxyPass statement. eg:
+ *
+ * ProxyPass / http://www.example.com
+ *
+ * in which case the worker goes in the conf slot.
+ */
+ if (balancer)
+ *worker = apr_array_push(balancer->workers);
+ else
+ *worker = apr_array_push(conf->workers)
+ memset(*worker, 0, sizeof(proxy_worker));
+ /* right here we just want to tuck away the worker info.
+ * if called during config, we don't have shm setup yet,
+ * so just note the info for later. */
+ wstatus = malloc(sizeof(proxy_worker_shared);) /* will be freed ap_proxy_create_worker */
+ memset(*wstatus, 0, sizeof(proxy_worker_shared));
+
+ (*worker)->hash = ap_proxy_hashfunc((*worker)->name, PROXY_HASHFUNC_DEFAULT);
+ (*worker)->cp = NULL;
+ (*worker)->mutex = NULL;
+ (*worker)->balancer = balancer;
+
+ PROXY_STRNCPY(wstatus->name, apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD));
+ PROXY_STRNCPY(wstatus->scheme, uri.scheme);
+ PROXY_STRNCPY(wstatus->hostname, uri.hostname);
+ wstatus->port = uri.port;
+ wstatus->flush_packets = flush_off;
+ wstatus->flush_wait = PROXY_FLUSH_WAIT;
+ wstatus->smax = -1;
+ wstatus->hash = (*worker)->hash;
+
+ (*worker)->s = wstatus;
+
+ return NULL;
+}
+/*
+ * Create an already defined worker and free up memory
+ */
+PROXY_DECLARE(void) ap_proxy_create_worker(proxy_worker *worker, proxy_worker_shared *shm, int i)
+{
+ memcpy(shm, worker->s, sizeof(proxy_worker_shared));
+ free(worker->s); /* was malloced in ap_proxy_define_worker */
+ worker->s = shm;
+ worker->s->index = i;
}
PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p)
{
apr_status_t rv;
-
-#if APR_HAS_THREADS
int mpm_threads;
-#endif
- if (worker->status & PROXY_WORKER_INITIALIZED) {
+ if (worker->s->status & PROXY_WORKER_INITIALIZED) {
/* The worker is already initialized */
return APR_SUCCESS;
}
/* Set default parameters */
- if (!worker->retry_set) {
- worker->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
+ if (!worker->s->retry_set) {
+ worker->s->retry = apr_time_from_sec(PROXY_WORKER_DEFAULT_RETRY);
}
/* By default address is reusable unless DisableReuse is set */
- if (worker->disablereuse) {
- worker->is_address_reusable = 0;
+ if (worker->s->disablereuse) {
+ worker->s->is_address_reusable = 0;
}
else {
- worker->is_address_reusable = 1;
+ worker->s->is_address_reusable = 1;
}
if (worker->cp == NULL)
return APR_EGENERAL;
}
-#if APR_HAS_THREADS
if (worker->mutex == NULL) {
rv = apr_thread_mutex_create(&(worker->mutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
ap_mpm_query(AP_MPMQ_MAX_THREADS, &mpm_threads);
if (mpm_threads > 1) {
/* Set hard max to no more then mpm_threads */
- if (worker->hmax == 0 || worker->hmax > mpm_threads) {
- worker->hmax = mpm_threads;
+ if (worker->s->hmax == 0 || worker->s->hmax > mpm_threads) {
+ worker->s->hmax = mpm_threads;
}
- if (worker->smax == -1 || worker->smax > worker->hmax) {
- worker->smax = worker->hmax;
+ if (worker->s->smax == -1 || worker->s->smax > worker->hmax) {
+ worker->s->smax = worker->s->hmax;
}
/* Set min to be lower then smax */
- if (worker->min > worker->smax) {
- worker->min = worker->smax;
+ if (worker->s->min > worker->s->smax) {
+ worker->s->min = worker->s->smax;
}
}
else {
/* This will supress the apr_reslist creation */
- worker->min = worker->smax = worker->hmax = 0;
+ worker->s->min = worker->s->smax = worker->s->hmax = 0;
}
if (worker->hmax) {
rv = apr_reslist_create(&(worker->cp->res),
apr_pool_cleanup_null);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "proxy: initialized worker %d in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
- worker->id, getpid(), worker->hostname, worker->min,
- worker->hmax, worker->smax);
+ "proxy: initialized worker in child %" APR_PID_T_FMT " for (%s) min=%d max=%d smax=%d",
+ getpid(), worker->s->hostname, worker->s->min,
+ worker->s->hmax, worker->s->smax);
#if (APR_MAJOR_VERSION > 0)
/* Set the acquire timeout */
- if (rv == APR_SUCCESS && worker->acquire_set) {
- apr_reslist_timeout_set(worker->cp->res, worker->acquire);
+ if (rv == APR_SUCCESS && worker->s->acquire_set) {
+ apr_reslist_timeout_set(worker->cp->res, worker->s->acquire);
}
#endif
}
else
-#endif
{
void *conn;
worker->cp->conn = conn;
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
- "proxy: initialized single connection worker %d in child %" APR_PID_T_FMT " for (%s)",
- worker->id, getpid(), worker->hostname);
+ "proxy: initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
+ getpid(), worker->hostname);
}
if (rv == APR_SUCCESS) {
- worker->status |= (PROXY_WORKER_INITIALIZED);
+ worker->s->status |= (PROXY_WORKER_INITIALIZED);
}
return rv;
}
}
}
+PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,
+ proxy_balancer **balancer,
+ request_rec *r,
+ proxy_server_conf *conf, char **url)
+{
+ int access_status;
+
+ access_status = proxy_run_pre_request(worker, balancer, r, conf, url);
+ if (access_status == DECLINED && *balancer == NULL) {
+ *worker = ap_proxy_get_worker(r->pool, conf, *url);
+ if (*worker) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "proxy: %s: found worker %s for %s",
+ (*worker)->scheme, (*worker)->name, *url);
+
+ *balancer = NULL;
+ access_status = OK;
+ }
+ else if (r->proxyreq == PROXYREQ_PROXY) {
+ if (conf->forward) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "proxy: *: found forward proxy worker for %s",
+ *url);
+ *balancer = NULL;
+ *worker = conf->forward;
+ access_status = OK;
+ /*
+ * The forward worker does not keep connections alive, so
+ * ensure that mod_proxy_http does the correct thing
+ * regarding the Connection header in the request.
+ */
+ apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
+ }
+ }
+ else if (r->proxyreq == PROXYREQ_REVERSE) {
+ if (conf->reverse) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "proxy: *: found reverse proxy worker for %s",
+ *url);
+ *balancer = NULL;
+ *worker = conf->reverse;
+ access_status = OK;
+ /*
+ * The reverse worker does not keep connections alive, so
+ * ensure that mod_proxy_http does the correct thing
+ * regarding the Connection header in the request.
+ */
+ apr_table_set(r->subprocess_env, "proxy-nokeepalive", "1");
+ }
+ }
+ }
+ else if (access_status == DECLINED && *balancer != NULL) {
+ /* All the workers are busy */
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "proxy: all workers are busy. Unable to serve %s",
+ *url);
+ access_status = HTTP_SERVICE_UNAVAILABLE;
+ }
+ return access_status;
+}
+
+PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,
+ proxy_balancer *balancer,
+ request_rec *r,
+ proxy_server_conf *conf)
+{
+ int access_status = OK;
+ if (balancer) {
+ access_status = proxy_run_post_request(worker, balancer, r, conf);
+ if (access_status == DECLINED) {
+ access_status = OK; /* no post_request handler available */
+ /* TODO: recycle direct worker */
+ }
+ }
+
+ return access_status;
+}
+
+/* DEPRECATED */
+PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **newsock,
+ const char *proxy_function,
+ apr_sockaddr_t *backend_addr,
+ const char *backend_name,
+ proxy_server_conf *conf,
+ request_rec *r)
+{
+ apr_status_t rv;
+ int connected = 0;
+ int loglevel;
+
+ while (backend_addr && !connected) {
+ if ((rv = apr_socket_create(newsock, backend_addr->family,
+ SOCK_STREAM, 0, r->pool)) != APR_SUCCESS) {
+ loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
+ ap_log_rerror(APLOG_MARK, loglevel, rv, r,
+ "proxy: %s: error creating fam %d socket for target %s",
+ proxy_function,
+ backend_addr->family,
+ backend_name);
+ /*
+ * this could be an IPv6 address from the DNS but the
+ * local machine won't give us an IPv6 socket; hopefully the
+ * DNS returned an additional address to try
+ */
+ backend_addr = backend_addr->next;
+ continue;
+ }
+
+ if (conf->recv_buffer_size > 0 &&
+ (rv = apr_socket_opt_set(*newsock, APR_SO_RCVBUF,
+ conf->recv_buffer_size))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_socket_opt_set(SO_RCVBUF): Failed to set "
+ "ProxyReceiveBufferSize, using default");
+ }
+
+ rv = apr_socket_opt_set(*newsock, APR_TCP_NODELAY, 1);
+ if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "apr_socket_opt_set(APR_TCP_NODELAY): "
+ "Failed to set");
+ }
+
+ /* Set a timeout on the socket */
+ if (conf->timeout_set) {
+ apr_socket_timeout_set(*newsock, conf->timeout);
+ }
+ else {
+ apr_socket_timeout_set(*newsock, r->server->timeout);
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "proxy: %s: fam %d socket created to connect to %s",
+ proxy_function, backend_addr->family, backend_name);
+
+ if (conf->source_address) {
+ rv = apr_socket_bind(*newsock, conf->source_address);
+ if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ "proxy: %s: failed to bind socket to local address",
+ proxy_function);
+ }
+ }
+
+ /* make the connection out of the socket */
+ rv = apr_socket_connect(*newsock, backend_addr);
+
+ /* if an error occurred, loop round and try again */
+ if (rv != APR_SUCCESS) {
+ apr_socket_close(*newsock);
+ loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;
+ ap_log_rerror(APLOG_MARK, loglevel, rv, r,
+ "proxy: %s: attempt to connect to %pI (%s) failed",
+ proxy_function,
+ backend_addr,
+ backend_name);
+ backend_addr = backend_addr->next;
+ continue;
+ }
+ connected = 1;
+ }
+ return connected ? 0 : 1;
+}
+
PROXY_DECLARE(int) ap_proxy_acquire_connection(const char *proxy_function,
proxy_conn_rec **conn,
proxy_worker *worker,
return HTTP_SERVICE_UNAVAILABLE;
}
}
-#if APR_HAS_THREADS
+
if (worker->hmax && worker->cp->res) {
rv = apr_reslist_acquire(worker->cp->res, (void **)conn);
}
else
-#endif
{
/* create the new connection if the previous was destroyed */
if (!worker->cp->conn) {
(*conn)->worker = worker;
(*conn)->close = 0;
-#if APR_HAS_THREADS
(*conn)->inreslist = 0;
-#endif
return OK;
}