From 9096c04779eb82163bb99d8b53a1b9b6cefb9a6f Mon Sep 17 00:00:00 2001 From: Jim Jagielski Date: Thu, 13 Jan 2011 15:59:02 +0000 Subject: [PATCH] Adjust for conf->workers being *proxy_worker and balancer->workers being **proxy_worker git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1058624 13f79535-47bb-0310-9956-ffa450edef68 --- modules/proxy/mod_proxy.c | 9 +++- modules/proxy/mod_proxy.h | 14 +++--- modules/proxy/mod_proxy_balancer.c | 73 ++++++++++++++++++++--------- modules/proxy/proxy_util.c | 75 +++++++++++++++++++----------- 4 files changed, 113 insertions(+), 58 deletions(-) diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c index 742c81f153..bd6c6aea62 100644 --- a/modules/proxy/mod_proxy.c +++ b/modules/proxy/mod_proxy.c @@ -1463,6 +1463,7 @@ static const char * const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r); if (err) return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL); + PROXY_COPY_CONF_PARAMS(worker, conf); } else { reuse = 1; @@ -1880,8 +1881,14 @@ static const char *add_member(cmd_parms *cmd, void *dummy, const char *arg) /* Try to find existing worker */ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, name); if (!worker) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, + "Defining worker '%s' for balancer '%s'", + name, balancer->name); if ((err = ap_proxy_define_worker(cmd->pool, &worker, balancer, conf, name)) != NULL) return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, + "Defined worker '%s' for balancer '%s'", + worker->s->name, balancer->name); PROXY_COPY_CONF_PARAMS(worker, conf); } else { reuse = 1; @@ -2089,7 +2096,7 @@ static const char *proxysection(cmd_parms *cmd, void *mconfig, const char *arg) return apr_pstrcat(cmd->pool, thiscmd->name, "> arguments are not supported for non url.", NULL); - if (ap_proxy_valid_balancer_name(conf->p)) { + if (ap_proxy_valid_balancer_name((char*)conf->p)) { balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p); if (!balancer) { err = ap_proxy_define_balancer(cmd->pool, &balancer, diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h index 1b5e245d6a..7965b65b82 100644 --- a/modules/proxy/mod_proxy.h +++ b/modules/proxy/mod_proxy.h @@ -265,14 +265,11 @@ struct proxy_conn_pool { PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR ) /* NOTE: these check the shared status */ -#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s && \ - ( (f)->s->status & PROXY_WORKER_INITIALIZED ) ) +#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s->status & PROXY_WORKER_INITIALIZED ) -#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s && \ - ( (f)->s->status & PROXY_WORKER_HOT_STANDBY ) ) +#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s->status & PROXY_WORKER_HOT_STANDBY ) -#define PROXY_WORKER_IS_USABLE(f) ( (f)->s && \ - ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \ +#define PROXY_WORKER_IS_USABLE(f) ( ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \ PROXY_WORKER_IS_INITIALIZED(f) ) /* default worker retry timeout in seconds */ @@ -344,8 +341,8 @@ struct proxy_worker { proxy_conn_pool *cp; /* Connection pool to use */ proxy_worker_shared *s; /* Shared data */ proxy_balancer *balancer; /* which balancer am I in? */ - void *context; /* general purpose storage */ apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */ + void *context; /* general purpose storage */ }; /* @@ -545,8 +542,9 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, * @param worker worker to be shared * @param shm location of shared info * @param i index into shm + * @return APR_SUCCESS or error code */ -PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i); +PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i); /** * Initialize the worker by setting up worker connection pool and mutex diff --git a/modules/proxy/mod_proxy_balancer.c b/modules/proxy/mod_proxy_balancer.c index e78212e0ba..982ccf2092 100644 --- a/modules/proxy/mod_proxy_balancer.c +++ b/modules/proxy/mod_proxy_balancer.c @@ -38,7 +38,15 @@ static char balancer_nonce[APR_UUID_FORMATTED_LENGTH + 1]; static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) { - ap_mutex_register(pconf, balancer_mutex_type, NULL, APR_LOCK_DEFAULT, 0); + + apr_status_t rv; + + rv = ap_mutex_register(pconf, balancer_mutex_type, NULL, + APR_LOCK_DEFAULT, 0); + if (rv != APR_SUCCESS) { + return rv; + } + return OK; } @@ -102,17 +110,18 @@ static void init_balancer_members(proxy_server_conf *conf, server_rec *s, proxy_balancer *balancer) { int i; - proxy_worker *worker; + proxy_worker **workers; - worker = (proxy_worker *)balancer->workers->elts; + workers = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++) { int worker_is_initialized; + proxy_worker *worker = *workers; worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker); if (!worker_is_initialized) { ap_proxy_initialize_worker(worker, s, conf->pool); } - ++worker; + ++workers; } /* Set default number of attempts to the number of @@ -197,13 +206,12 @@ static proxy_worker *find_route_worker(proxy_balancer *balancer, int checked_standby; proxy_worker **workers; - proxy_worker *worker; checking_standby = checked_standby = 0; while (!checked_standby) { workers = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++, workers++) { - worker = *workers; + proxy_worker *worker = *workers; if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) : PROXY_WORKER_IS_STANDBY(worker)) ) continue; if (*(worker->s->route) && strcmp(worker->s->route, route) == 0) { @@ -666,6 +674,18 @@ static void recalc_factors(proxy_balancer *balancer) } } +static apr_status_t lock_remove(void *data) +{ + server_rec *s = data; + void *sconf = s->module_config; + proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); + if (conf->mutex) { + apr_global_mutex_destroy(conf->mutex); + conf->mutex = NULL; + } + return(0); +} + /* post_config hook: */ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) @@ -677,12 +697,13 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); const char *userdata_key = "mod_proxy_balancer_init"; - /* balancer_init() will be called twice during startup. So, only + /* balancer_post_config() will be called twice during startup. So, only * set up the static data the 1st time through. */ apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); + return OK; } /* Retrieve a UUID and store the nonce for the lifetime of * the process. */ @@ -692,10 +713,16 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, /* Create global mutex */ rv = ap_global_mutex_create(&conf->mutex, NULL, balancer_mutex_type, NULL, s, pconf, 0); - if (rv != APR_SUCCESS) { + if (rv != APR_SUCCESS || !conf->mutex) { + ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, + "mutex creation of %s failed", balancer_mutex_type); return HTTP_INTERNAL_SERVER_ERROR; } + apr_pool_cleanup_register(pconf, (void *)s, lock_remove, + apr_pool_cleanup_null); + + /* * Get worker slotmem setup */ @@ -718,8 +745,7 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, /* Initialize shared scoreboard data */ proxy_balancer *balancer = (proxy_balancer *)conf->balancers->elts; for (i = 0; i < conf->balancers->nelts; i++, balancer++) { - apr_size_t size; - unsigned int num; + proxy_worker **workers; proxy_worker *worker; ap_slotmem_instance_t *new = NULL; @@ -736,18 +762,13 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, return !OK; } balancer->slot = new; -#if 0 - rv = storage->attach(&(balancer->slot), balancer->name, &size, &num, pconf); - if (rv != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_attach failed"); - return !OK; - } -#endif - worker = (proxy_worker *)balancer->workers->elts; - for (j = 0; j < balancer->workers->nelts; j++, worker++) { + + workers = (proxy_worker **)balancer->workers->elts; + for (j = 0; j < balancer->workers->nelts; j++, workers++) { proxy_worker_shared *shm; unsigned int index; + worker = *workers; if ((rv = storage->grab(balancer->slot, &index)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_grab failed"); return !OK; @@ -757,8 +778,11 @@ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_dptr failed"); return !OK; } - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %pp %pp %d", worker->s, shm, (int)index); - ap_proxy_share_worker(worker, shm, index); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %x %pp %pp %pp %pp %d", worker->hash, worker->balancer, (char *)worker->context, worker->s, shm, (int)index); + if ((rv = ap_proxy_share_worker(worker, shm, index)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Cannot share worker"); + return !OK; + } } } s = s->next; @@ -890,7 +914,7 @@ static int balancer_handler(request_rec *r) "\n", NULL); ap_rvputs(r, " ", worker->s->hostname, "\n", NULL); - ap_rprintf(r, " %d\n", + ap_rprintf(r, " %d\n", worker->s->lbfactor); ap_rputs(" \n", r); ++workers; @@ -1036,6 +1060,11 @@ static void balancer_child_init(apr_pool_t *p, server_rec *s) proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module); apr_status_t rv; + if (!conf->mutex) { + ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, + "no mutex %s", balancer_mutex_type); + return; + } /* Re-open the mutex for the child. */ rv = apr_global_mutex_child_init(&conf->mutex, apr_global_mutex_lockfile(conf->mutex), diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c index 4ad4e9968c..42b2c613b0 100644 --- a/modules/proxy/proxy_util.c +++ b/modules/proxy/proxy_util.c @@ -1577,7 +1577,7 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, int worker_name_length; const char *c; char *url_copy; - int i, end; + int i; c = ap_strchr_c(url, ':'); if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') { @@ -1606,31 +1606,39 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p, ap_str_tolower(url_copy); min_match = strlen(url_copy); } - - if (balancer) { - worker = (proxy_worker *)balancer->workers->elts; - end = balancer->workers->nelts; - } else { - worker = (proxy_worker *)conf->workers->elts; - end = conf->workers->nelts; - } - /* * Do a "longest match" on the worker name to find the worker that * fits best to the URL, but keep in mind that we must have at least * a minimum matching of length min_match such that * scheme://hostname[:port] matches between worker and url. */ - for (i = 0; i < end; i++) { - if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) - && (worker_name_length >= min_match) - && (worker_name_length > max_match) - && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { - max_worker = worker; - max_match = worker_name_length; + + if (balancer) { + proxy_worker **workers = (proxy_worker **)balancer->workers->elts; + for (i = 0; i < balancer->workers->nelts; i++, workers++) { + worker = *workers; + if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + && (worker_name_length >= min_match) + && (worker_name_length > max_match) + && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { + max_worker = worker; + max_match = worker_name_length; + } + + } + } else { + worker = (proxy_worker *)conf->workers->elts; + for (i = 0; i < conf->workers->nelts; i++, worker++) { + if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + && (worker_name_length >= min_match) + && (worker_name_length > max_match) + && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { + max_worker = worker; + max_match = worker_name_length; + } } - worker++; } + return max_worker; } @@ -1671,20 +1679,27 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, * * in which case the worker goes in the conf slot. */ - if (balancer) - *worker = apr_array_push(balancer->workers); - else if (conf) + if (balancer) { + proxy_worker **runtime; + /* recall that we get a ptr to the ptr here */ + runtime = apr_array_push(balancer->workers); + *worker = *runtime = apr_palloc(p, sizeof(proxy_worker)); /* right to left baby */ + } else if (conf) { *worker = apr_array_push(conf->workers); - else { - proxy_worker *w = apr_palloc(p, sizeof(proxy_worker)); - *worker = w; + } else { + /* we need to allocate space here */ + *worker = apr_palloc(p, sizeof(proxy_worker)); } memset(*worker, 0, sizeof(proxy_worker)); /* right here we just want to tuck away the worker info. * if called during config, we don't have shm setup yet, * so just note the info for later. */ +#if 0 wstatus = malloc(sizeof(proxy_worker_shared)); /* will be freed ap_proxy_share_worker */ +#else + wstatus = apr_palloc(p, sizeof(proxy_worker_shared)); +#endif memset(wstatus, 0, sizeof(proxy_worker_shared)); @@ -1698,24 +1713,30 @@ PROXY_DECLARE(char *) ap_proxy_define_worker(apr_pool_t *p, wstatus->hash = ap_proxy_hashfunc(wstatus->name, PROXY_HASHFUNC_DEFAULT); (*worker)->hash = wstatus->hash; + (*worker)->context = NULL; (*worker)->cp = NULL; (*worker)->mutex = NULL; (*worker)->balancer = balancer; - (*worker)->s = wstatus; - + return NULL; } /* * Create an already defined worker and free up memory */ -PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i) +PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i) { + if (!shm || !worker->s) + return APR_EINVAL; + memcpy(shm, worker->s, sizeof(proxy_worker_shared)); +#if 0 free(worker->s); /* was malloced in ap_proxy_define_worker */ +#endif worker->s = shm; worker->s->index = i; + return APR_SUCCESS; } PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p) -- 2.40.0