Changes with Apache 2.4.5
+ *) mod_proxy: Fix seg-faults when using the global pool on threaded
+ MPMs [Thomas Eckert <thomas.r.w.eckert gmail.com>, Graham Leggett,
+ Jim Jagielski]
+
*) mod_deflate: Remove assumptions as to when an EOS bucket might arrive.
Gracefully step aside if the body size is zero. [Graham Leggett]
PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
[ start all new proposals below, under PATCHES PROPOSED. ]
- * mod_proxy: Fix pool usage by protecting w/ a mutex
- trunk patch: http://svn.apache.org/viewvc?view=revision&revision=1480627
- http://svn.apache.org/viewvc?view=revision&revision=1482859
- http://svn.apache.org/viewvc?view=revision&revision=1483190
- http://svn.apache.org/viewvc?view=revision&revision=1484343
- http://svn.apache.org/viewvc?view=revision&revision=1500437
- 2.4.x patch: trunk works, modulo CHANGES
- +1: jim, minfrin, sf
PATCHES PROPOSED TO BACKPORT FROM TRUNK:
[ New proposals should be added at the end of the list ]
#define MAX(x,y) ((x) >= (y) ? (x) : (y))
#endif
+static const char * const proxy_id = "proxy";
+apr_global_mutex_t *proxy_mutex = NULL;
+
/*
* A Web proxy module. Stages:
*
ps->badopt_set = 0;
ps->source_address = NULL;
ps->source_address_set = 0;
- ps->pool = p;
+ apr_pool_create_ex(&ps->pool, p, NULL, NULL);
return ps;
}
ps->proxy_status_set = overrides->proxy_status_set || base->proxy_status_set;
ps->source_address = (overrides->source_address_set == 0) ? base->source_address : overrides->source_address;
ps->source_address_set = overrides->source_address_set || base->source_address_set;
- ps->pool = p;
+ ps->pool = base->pool;
return ps;
}
static const char *set_source_address(cmd_parms *parms, void *dummy,
static int proxy_post_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
+ apr_status_t rv = ap_global_mutex_create(&proxy_mutex, NULL,
+ proxy_id, NULL, s, pconf, 0);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02478)
+ "failed to create %s mutex", proxy_id);
+ return rv;
+ }
proxy_ssl_enable = APR_RETRIEVE_OPTIONAL_FN(ssl_proxy_enable);
proxy_ssl_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
{
proxy_worker *reverse = NULL;
+ apr_status_t rv = apr_global_mutex_child_init(&proxy_mutex,
+ apr_global_mutex_lockfile(proxy_mutex),
+ p);
+ if (rv != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(02479)
+ "could not init proxy_mutex in child");
+ exit(1); /* Ugly, but what else? */
+ }
+
/* TODO */
while (s) {
void *sconf = s->module_config;
/*
* This routine is called before the server processes the configuration
- * files. There is no return value.
+ * files.
*/
static int proxy_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
+ apr_status_t rv = ap_mutex_register(pconf, proxy_id, NULL,
+ APR_LOCK_DEFAULT, 0);
+ if (rv != APR_SUCCESS) {
+ ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, plog, APLOGNO(02480)
+ "failed to register %s mutex", proxy_id);
+ return 500; /* An HTTP status would be a misnomer! */
+ }
+
APR_OPTIONAL_HOOK(ap, status_hook, proxy_status_hook, NULL, NULL,
APR_HOOK_MIDDLE);
/* Reset workers count on gracefull restart */
status_full
} proxy_status; /* Status display options */
apr_sockaddr_t *source_address;
- apr_global_mutex_t *mutex; /* global lock (needed??) */
+ apr_global_mutex_t *mutex; /* global lock, for pool, etc */
ap_slotmem_instance_t *bslot; /* balancers shm data - runtime */
ap_slotmem_provider_t *storage;
(val = apr_table_get(params, "b_nwrkr"))) {
char *ret;
proxy_worker *nworker;
- nworker = ap_proxy_get_worker(conf->pool, bsel, conf, val);
+ nworker = ap_proxy_get_worker(r->pool, bsel, conf, val);
if (!nworker && storage->num_free_slots(bsel->wslot)) {
if ((rv = PROXY_GLOBAL_LOCK(bsel)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01194)
const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_path;
const apr_strmatch_pattern PROXY_DECLARE_DATA *ap_proxy_strmatch_domain;
+extern apr_global_mutex_t *proxy_mutex;
+
static int proxy_match_ipaddr(struct dirconn_entry *This, request_rec *r);
static int proxy_match_domainname(struct dirconn_entry *This, request_rec *r);
static int proxy_match_hostname(struct dirconn_entry *This, request_rec *r);
else {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00927)
"initializing worker %s local", worker->s->name);
+ apr_global_mutex_lock(proxy_mutex);
/* Now init local worker data */
if (worker->tmutex == NULL) {
rv = apr_thread_mutex_create(&(worker->tmutex), APR_THREAD_MUTEX_DEFAULT, p);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00928)
"can not create worker thread mutex");
+ apr_global_mutex_unlock(proxy_mutex);
return rv;
}
}
if (worker->cp == NULL) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00929)
"can not create connection pool");
+ apr_global_mutex_unlock(proxy_mutex);
return APR_EGENERAL;
}
"initialized single connection worker in child %" APR_PID_T_FMT " for (%s)",
getpid(), worker->s->hostname);
}
+ apr_global_mutex_unlock(proxy_mutex);
+
}
if (rv == APR_SUCCESS) {
worker->s->status |= (PROXY_WORKER_INITIALIZED);
}
if (!found) {
proxy_worker **runtime;
+ apr_global_mutex_lock(proxy_mutex);
runtime = apr_array_push(b->workers);
*runtime = apr_palloc(conf->pool, sizeof(proxy_worker));
+ apr_global_mutex_unlock(proxy_mutex);
(*runtime)->hash = shm->hash;
(*runtime)->context = NULL;
(*runtime)->cp = NULL;
(*runtime)->balancer = b;
(*runtime)->s = shm;
(*runtime)->tmutex = NULL;
- if ((rv = ap_proxy_initialize_worker(*runtime, s, conf->pool)) != APR_SUCCESS) {
+ rv = ap_proxy_initialize_worker(*runtime, s, conf->pool);
+ if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, APLOGNO(00966) "Cannot init worker");
return rv;
}