From 4a2e3a23966c5177e2e09ca94c24c817206198ce Mon Sep 17 00:00:00 2001 From: Jim Jagielski Date: Fri, 2 Dec 2016 11:46:38 +0000 Subject: [PATCH] Merge r1738631, r1738633, r1738635, r1757030, r1757031, r1770752, r1770768 from trunk: Don't take over scoreboard slots from gracefully finishing threads Otherwise the old and the new thread will both update the same scoreboard slot with undefined results. add comments Document which directives set which variables Make ap_find_child_by_pid() look at all slots that have ever been used. This is preparation to allow to use more scoreboard slots in mpm event. mpm_event: minor code simplification - move variable initializations into declarations - use max_workers variable mpm_event: don't re-use scoreboard slots that are still in use This causes inconsistent data in the scoreboard (due to async connections) and makes it difficult to determine what is going on. Therefore it is not a useful fix for the scoreboard-full issues (PR 53555). The consent on the dev list is that we should allocate/use more scoreboard entries instead. Use all available scoreboard slots Allow to use all slots up to ServerLimit. This makes 'scoreboard full' errors much less likely. And if ther is a situation where the scoreboard is full, don't make any more processes finish gracefully due to reduced load until some old processes have terminated. Otherwise, the situation would get worse once the load increases again. ap_daemon_limit is renamed to the more descriptive active_server_limit, to make sure that all its uses are taken care of. PR 53555 mpm_event: add clarifying comment from jim Submitted by: sf Reviewed/backported by: jim git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1772335 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 8 + STATUS | 15 -- server/mpm/event/event.c | 305 ++++++++++++++++++--------------------- server/scoreboard.c | 2 +- 4 files changed, 149 insertions(+), 181 deletions(-) diff --git a/CHANGES b/CHANGES index 5d2462669e..dd69e7c0af 100644 --- a/CHANGES +++ b/CHANGES @@ -15,6 +15,14 @@ Changes with Apache 2.4.24 *) mod_ssl: Fix quick renegotiation (OptRenegotiaton) with no intermediate in the client certificate chain. PR 55786. [Yann Ylavic] + *) event: Allow to use the whole allocated scoreboard (up to ServerLimit + slots) to avoid scoreboard full errors when some processes are finishing + gracefully. Also, make gracefully finishing processes close all + keep-alive connections. PR 53555. [Stefan Fritsch] + + *) mpm_event: Don't take over scoreboard slots from gracefully finishing + threads. [Stefan Fritsch] + *) mpm_event: Free memory earlier when shutting down processes. [Stefan Fritsch] diff --git a/STATUS b/STATUS index 71988742f0..517baab617 100644 --- a/STATUS +++ b/STATUS @@ -117,21 +117,6 @@ RELEASE SHOWSTOPPERS: PATCHES ACCEPTED TO BACKPORT FROM TRUNK: [ start all new proposals below, under PATCHES PROPOSED. ] - *) mpm_event: Use all free scoreboard slots up to ServerLimit, but don't - re-use scoreboard slots of still running, gracefully finishing processes. - PR: 53555 - trunk patches: - https://svn.apache.org/r1738631 - https://svn.apache.org/r1738633 - https://svn.apache.org/r1738635 - https://svn.apache.org/r1757030 - https://svn.apache.org/r1757031 - https://svn.apache.org/r1770752 - https://svn.apache.org/r1770768 - 2.4.x patch: https://people.apache.org/~sf/PR53555_4_use_all_slots.diff - docs: https://svn.apache.org/r1770771 (in addition to the above) - +1: sf, jim, wrowe - PATCHES PROPOSED TO BACKPORT FROM TRUNK: diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c index bd74f24e7c..c133c1a0a3 100644 --- a/server/mpm/event/event.c +++ b/server/mpm/event/event.c @@ -160,15 +160,18 @@ #endif #define WORKER_FACTOR_SCALE 16 /* scale factor to allow fractional values */ static unsigned int worker_factor = DEFAULT_WORKER_FACTOR * WORKER_FACTOR_SCALE; - -static int threads_per_child = 0; /* Worker threads per child */ -static int ap_daemons_to_start = 0; -static int min_spare_threads = 0; -static int max_spare_threads = 0; -static int ap_daemons_limit = 0; -static int max_workers = 0; -static int server_limit = 0; -static int thread_limit = 0; + /* AsyncRequestWorkerFactor * 16 */ + +static int threads_per_child = 0; /* ThreadsPerChild */ +static int ap_daemons_to_start = 0; /* StartServers */ +static int min_spare_threads = 0; /* MinSpareThreads */ +static int max_spare_threads = 0; /* MaxSpareThreads */ +static int active_daemons_limit = 0; /* MaxRequestWorkers / ThreadsPerChild */ +static int active_daemons = 0; /* workers that still active, i.e. are + not shutting down gracefully */ +static int max_workers = 0; /* MaxRequestWorkers */ +static int server_limit = 0; /* ServerLimit */ +static int thread_limit = 0; /* ThreadLimit */ static int had_healthy_child = 0; static int dying = 0; static int workers_may_exit = 0; @@ -336,6 +339,14 @@ typedef struct event_retained_data { * scoreboard. */ int max_daemons_limit; + + /* + * All running workers, active and shutting down, including those that + * may be left from before a graceful restart. + * Not kept up-to-date when shutdown is pending. + */ + int total_daemons; + /* * idle_spawn_rate is the number of children that will be spawned on the * next maintenance cycle if there aren't enough idle servers. It is @@ -549,7 +560,7 @@ static int event_query(int query_code, int *result, apr_status_t *rv) *result = ap_max_requests_per_child; break; case AP_MPMQ_MAX_DAEMONS: - *result = ap_daemons_limit; + *result = active_daemons_limit; break; case AP_MPMQ_MPM_STATE: *result = mpm_state; @@ -586,27 +597,6 @@ static void event_note_child_started(int slot, pid_t pid) retained->my_generation, slot, MPM_CHILD_STARTED); } -static void event_note_child_lost_slot(int slot, pid_t newpid) -{ - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00458) - "pid %" APR_PID_T_FMT " taking over scoreboard slot from " - "%" APR_PID_T_FMT "%s", - newpid, - ap_scoreboard_image->parent[slot].pid, - ap_scoreboard_image->parent[slot].quiescing ? - " (quiescing)" : ""); - ap_run_child_status(ap_server_conf, - ap_scoreboard_image->parent[slot].pid, - ap_scoreboard_image->parent[slot].generation, - slot, MPM_CHILD_LOST_SLOT); - /* Don't forget about this exiting child process, or we - * won't be able to kill it if it doesn't exit by the - * time the server is shut down. - */ - ap_register_extra_mpm_process(ap_scoreboard_image->parent[slot].pid, - ap_scoreboard_image->parent[slot].generation); -} - static const char *event_get_name(void) { return "event"; @@ -2150,7 +2140,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy) int status = ap_scoreboard_image->servers[my_child_num][i].status; - if (status != SERVER_GRACEFUL && status != SERVER_DEAD) { + if (status != SERVER_DEAD) { continue; } @@ -2457,6 +2447,15 @@ static int make_child(server_rec * s, int slot, int bucket) retained->max_daemons_limit = slot + 1; } + if (ap_scoreboard_image->parent[slot].pid != 0) { + /* XXX replace with assert or remove ? */ + ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(03455) + "BUG: Scoreboard slot %d should be empty but is " + "in use by pid %" APR_PID_T_FMT, + slot, ap_scoreboard_image->parent[slot].pid); + return -1; + } + if (one_process) { my_bucket = &all_buckets[0]; @@ -2510,17 +2509,12 @@ static int make_child(server_rec * s, int slot, int bucket) return -1; } - if (ap_scoreboard_image->parent[slot].pid != 0) { - /* This new child process is squatting on the scoreboard - * entry owned by an exiting child process, which cannot - * exit until all active requests complete. - */ - event_note_child_lost_slot(slot, pid); - } ap_scoreboard_image->parent[slot].quiescing = 0; ap_scoreboard_image->parent[slot].not_accepting = 0; ap_scoreboard_image->parent[slot].bucket = bucket; event_note_child_started(slot, pid); + active_daemons++; + retained->total_daemons++; return 0; } @@ -2529,7 +2523,7 @@ static void startup_children(int number_to_start) { int i; - for (i = 0; number_to_start && i < ap_daemons_limit; ++i) { + for (i = 0; number_to_start && i < server_limit; ++i) { if (ap_scoreboard_image->parent[i].pid != 0) { continue; } @@ -2543,34 +2537,22 @@ static void startup_children(int number_to_start) static void perform_idle_server_maintenance(int child_bucket, int num_buckets) { int i, j; - int idle_thread_count; + int idle_thread_count = 0; worker_score *ws; process_score *ps; - int free_length; - int totally_free_length = 0; + int free_length = 0; int free_slots[MAX_SPAWN_RATE]; - int last_non_dead; - int total_non_dead; + int last_non_dead = -1; int active_thread_count = 0; - /* initialize the free_list */ - free_length = 0; - - idle_thread_count = 0; - last_non_dead = -1; - total_non_dead = 0; - - for (i = 0; i < ap_daemons_limit; ++i) { + for (i = 0; i < server_limit; ++i) { /* Initialization to satisfy the compiler. It doesn't know * that threads_per_child is always > 0 */ int status = SERVER_DEAD; - int any_dying_threads = 0; - int any_dead_threads = 0; - int all_dead_threads = 1; int child_threads_active = 0; if (i >= retained->max_daemons_limit && - totally_free_length == retained->idle_spawn_rate[child_bucket]) { + free_length == retained->idle_spawn_rate[child_bucket]) { /* short cut if all active processes have been examined and * enough empty scoreboard slots have been found */ @@ -2578,25 +2560,17 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) break; } ps = &ap_scoreboard_image->parent[i]; - for (j = 0; j < threads_per_child; j++) { - ws = &ap_scoreboard_image->servers[i][j]; - status = ws->status; - - /* XXX any_dying_threads is probably no longer needed GLA */ - any_dying_threads = any_dying_threads || - (status == SERVER_GRACEFUL); - any_dead_threads = any_dead_threads || (status == SERVER_DEAD); - all_dead_threads = all_dead_threads && - (status == SERVER_DEAD || status == SERVER_GRACEFUL); - - /* We consider a starting server as idle because we started it - * at least a cycle ago, and if it still hasn't finished starting - * then we're just going to swamp things worse by forking more. - * So we hopefully won't need to fork more if we count it. - * This depends on the ordering of SERVER_READY and SERVER_STARTING. - */ - if (ps->pid != 0) { /* XXX just set all_dead_threads in outer - for loop if no pid? not much else matters */ + if (ps->pid != 0) { + for (j = 0; j < threads_per_child; j++) { + ws = &ap_scoreboard_image->servers[i][j]; + status = ws->status; + + /* We consider a starting server as idle because we started it + * at least a cycle ago, and if it still hasn't finished starting + * then we're just going to swamp things worse by forking more. + * So we hopefully won't need to fork more if we count it. + * This depends on the ordering of SERVER_READY and SERVER_STARTING. + */ if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting && ps->generation == retained->my_generation && ps->bucket == child_bucket) @@ -2607,39 +2581,13 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) ++child_threads_active; } } + last_non_dead = i; } active_thread_count += child_threads_active; - if (any_dead_threads - && totally_free_length < retained->idle_spawn_rate[child_bucket] - && free_length < MAX_SPAWN_RATE / num_buckets - && (!ps->pid /* no process in the slot */ - || ps->quiescing)) { /* or at least one is going away */ - if (all_dead_threads) { - /* great! we prefer these, because the new process can - * start more threads sooner. So prioritize this slot - * by putting it ahead of any slots with active threads. - * - * first, make room by moving a slot that's potentially still - * in use to the end of the array - */ - free_slots[free_length] = free_slots[totally_free_length]; - free_slots[totally_free_length++] = i; - } - else { - /* slot is still in use - back of the bus - */ - free_slots[free_length] = i; - } - ++free_length; - } - else if (child_threads_active == threads_per_child) { + if (!ps->pid && free_length < retained->idle_spawn_rate[child_bucket]) + free_slots[free_length++] = i; + else if (child_threads_active == threads_per_child) had_healthy_child = 1; - } - /* XXX if (!ps->quiescing) is probably more reliable GLA */ - if (!any_dying_threads) { - last_non_dead = i; - ++total_non_dead; - } } if (retained->sick_child_detected) { @@ -2667,32 +2615,56 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) retained->max_daemons_limit = last_non_dead + 1; - if (idle_thread_count > max_spare_threads / num_buckets) { - /* Kill off one child */ - ap_mpm_podx_signal(all_buckets[child_bucket].pod, - AP_MPM_PODX_GRACEFUL); - retained->idle_spawn_rate[child_bucket] = 1; + if (idle_thread_count > max_spare_threads / num_buckets) + { + /* + * Child processes that we ask to shut down won't die immediately + * but may stay around for a long time when they finish their + * requests. If the server load changes many times, many such + * gracefully finishing processes may accumulate, filling up the + * scoreboard. To avoid running out of scoreboard entries, we + * don't shut down more processes when the total number of processes + * is high. + * + * XXX It would be nice if we could + * XXX - kill processes without keepalive connections first + * XXX - tell children to stop accepting new connections, and + * XXX depending on server load, later be able to resurrect them + * or kill them + */ + if (retained->total_daemons <= active_daemons_limit && + retained->total_daemons < server_limit) { + /* Kill off one child */ + ap_mpm_podx_signal(all_buckets[child_bucket].pod, + AP_MPM_PODX_GRACEFUL); + retained->idle_spawn_rate[child_bucket] = 1; + active_daemons--; + } else { + ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf, + "Not shutting down child: total daemons %d / " + "active limit %d / ServerLimit %d", + retained->total_daemons, active_daemons_limit, + server_limit); + } } else if (idle_thread_count < min_spare_threads / num_buckets) { - /* terminate the free list */ - if (free_length == 0) { /* scoreboard is full, can't fork */ - - if (active_thread_count >= ap_daemons_limit * threads_per_child) { - if (!retained->maxclients_reported) { - /* only report this condition once */ - ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00484) - "server reached MaxRequestWorkers setting, " - "consider raising the MaxRequestWorkers " - "setting"); - retained->maxclients_reported = 1; - } - } - else { - ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00485) - "scoreboard is full, not at MaxRequestWorkers"); + if (active_thread_count >= max_workers) { + if (!retained->maxclients_reported) { + /* only report this condition once */ + ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00484) + "server reached MaxRequestWorkers setting, " + "consider raising the MaxRequestWorkers " + "setting"); + retained->maxclients_reported = 1; } retained->idle_spawn_rate[child_bucket] = 1; } + else if (free_length == 0) { /* scoreboard is full, can't fork */ + ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO() + "scoreboard is full, not at MaxRequestWorkers." + "Increase ServerLimit."); + retained->idle_spawn_rate[child_bucket] = 1; + } else { if (free_length > retained->idle_spawn_rate[child_bucket]) { free_length = retained->idle_spawn_rate[child_bucket]; @@ -2703,10 +2675,17 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) "to increase StartServers, ThreadsPerChild " "or Min/MaxSpareThreads), " "spawning %d children, there are around %d idle " - "threads, and %d total children", free_length, - idle_thread_count, total_non_dead); + "threads, %d active children, and %d children " + "that are shutting down", free_length, + idle_thread_count, active_daemons, + retained->total_daemons); } for (i = 0; i < free_length; ++i) { + ap_log_error(APLOG_MARK, APLOG_TRACE5, 0, ap_server_conf, + "Spawning new child: slot %d active / " + "total daemons: %d/%d", + free_slots[i], active_daemons, + retained->total_daemons); make_child(ap_server_conf, free_slots[i], child_bucket); } /* the next time around we want to spawn twice as many if this @@ -2728,7 +2707,6 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets) static void server_main_loop(int remaining_children_to_start, int num_buckets) { - ap_generation_t old_gen; int child_slot; apr_exit_why_e exitwhy; int status, processed_status; @@ -2752,6 +2730,10 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets) == retained->my_generation) { shutdown_pending = 1; child_fatal = 1; + /* + * total_daemons counting will be off now, but as we + * are shutting down, that is not an issue anymore. + */ return; } else { @@ -2778,13 +2760,16 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets) event_note_child_killed(child_slot, 0, 0); ps = &ap_scoreboard_image->parent[child_slot]; + if (!ps->quiescing) + active_daemons--; ps->quiescing = 0; + /* NOTE: We don't dec in the (child_slot < 0) case! */ + retained->total_daemons--; if (processed_status == APEXIT_CHILDSICK) { /* resource shortage, minimize the fork rate */ retained->idle_spawn_rate[ps->bucket] = 1; } - else if (remaining_children_to_start - && child_slot < ap_daemons_limit) { + else if (remaining_children_to_start) { /* we're still doing a 1-for-1 replacement of dead * children with new children */ @@ -2792,24 +2777,12 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets) --remaining_children_to_start; } } - else if (ap_unregister_extra_mpm_process(pid.pid, &old_gen) == 1) { - - event_note_child_killed(-1, /* already out of the scoreboard */ - pid.pid, old_gen); - if (processed_status == APEXIT_CHILDSICK - && old_gen == retained->my_generation) { - /* resource shortage, minimize the fork rate */ - for (i = 0; i < num_buckets; i++) { - retained->idle_spawn_rate[i] = 1; - } - } #if APR_HAS_OTHER_CHILD - } else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH, status) == 0) { /* handled */ -#endif } +#endif else if (retained->is_graceful) { /* Great, we've probably just lost a slot in the * scoreboard. Somehow we don't know about this child. @@ -2871,8 +2844,8 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) /* Don't thrash since num_buckets depends on the * system and the number of online CPU cores... */ - if (ap_daemons_limit < num_buckets) - ap_daemons_limit = num_buckets; + if (active_daemons_limit < num_buckets) + active_daemons_limit = num_buckets; if (ap_daemons_to_start < num_buckets) ap_daemons_to_start = num_buckets; /* We want to create as much children at a time as the number of buckets, @@ -2896,8 +2869,8 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) * supposed to start up without the 1 second penalty between each fork. */ remaining_children_to_start = ap_daemons_to_start; - if (remaining_children_to_start > ap_daemons_limit) { - remaining_children_to_start = ap_daemons_limit; + if (remaining_children_to_start > active_daemons_limit) { + remaining_children_to_start = active_daemons_limit; } if (!retained->is_graceful) { startup_children(remaining_children_to_start); @@ -2927,7 +2900,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) * Kill child processes, tell them to call child_exit, etc... */ for (i = 0; i < num_buckets; i++) { - ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit, + ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit, AP_MPM_PODX_RESTART); } ap_reclaim_child_processes(1, /* Start with SIGTERM */ @@ -2951,7 +2924,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) /* Close our listeners, and then ask our children to do same */ ap_close_listeners(); for (i = 0; i < num_buckets; i++) { - ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit, + ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit, AP_MPM_PODX_GRACEFUL); } ap_relieve_child_processes(event_note_child_killed); @@ -2979,7 +2952,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) ap_relieve_child_processes(event_note_child_killed); active_children = 0; - for (index = 0; index < ap_daemons_limit; ++index) { + for (index = 0; index < retained->max_daemons_limit; ++index) { if (ap_mpm_safe_kill(MPM_CHILD_PID(index), 0) == APR_SUCCESS) { active_children = 1; /* Having just one child is enough to stay around */ @@ -2994,7 +2967,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) * really dead. */ for (i = 0; i < num_buckets; i++) { - ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit, + ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit, AP_MPM_PODX_RESTART); } ap_reclaim_child_processes(1, event_note_child_killed); @@ -3023,7 +2996,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) " received. Doing graceful restart"); /* wake up the children...time to die. But we'll have more soon */ for (i = 0; i < num_buckets; i++) { - ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit, + ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit, AP_MPM_PODX_GRACEFUL); } @@ -3038,7 +3011,7 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) * pthreads are stealing signals from us left and right. */ for (i = 0; i < num_buckets; i++) { - ap_mpm_podx_killpg(all_buckets[i].pod, ap_daemons_limit, + ap_mpm_podx_killpg(all_buckets[i].pod, active_daemons_limit, AP_MPM_PODX_RESTART); } @@ -3048,6 +3021,8 @@ static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s) "SIGHUP received. Attempting to restart"); } + active_daemons = 0; + return OK; } @@ -3261,9 +3236,9 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog, max_spare_threads = DEFAULT_MAX_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD; server_limit = DEFAULT_SERVER_LIMIT; thread_limit = DEFAULT_THREAD_LIMIT; - ap_daemons_limit = server_limit; + active_daemons_limit = server_limit; threads_per_child = DEFAULT_THREADS_PER_CHILD; - max_workers = ap_daemons_limit * threads_per_child; + max_workers = active_daemons_limit * threads_per_child; had_healthy_child = 0; ap_extended_status = 0; @@ -3472,10 +3447,10 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog, max_workers = threads_per_child; } - ap_daemons_limit = max_workers / threads_per_child; + active_daemons_limit = max_workers / threads_per_child; if (max_workers % threads_per_child) { - int tmp_max_workers = ap_daemons_limit * threads_per_child; + int tmp_max_workers = active_daemons_limit * threads_per_child; if (startup) { ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00513) @@ -3483,7 +3458,7 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog, "multiple of ThreadsPerChild of %d, decreasing to nearest " "multiple %d, for a maximum of %d servers.", max_workers, threads_per_child, tmp_max_workers, - ap_daemons_limit); + active_daemons_limit); } else { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00514) "MaxRequestWorkers of %d is not an integer multiple " @@ -3494,25 +3469,25 @@ static int event_check_config(apr_pool_t *p, apr_pool_t *plog, max_workers = tmp_max_workers; } - if (ap_daemons_limit > server_limit) { + if (active_daemons_limit > server_limit) { if (startup) { ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00515) "WARNING: MaxRequestWorkers of %d would require %d servers " "and would exceed ServerLimit of %d, decreasing to %d. " "To increase, please see the ServerLimit directive.", - max_workers, ap_daemons_limit, server_limit, + max_workers, active_daemons_limit, server_limit, server_limit * threads_per_child); } else { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00516) "MaxRequestWorkers of %d would require %d servers and " "exceed ServerLimit of %d, decreasing to %d", - max_workers, ap_daemons_limit, server_limit, + max_workers, active_daemons_limit, server_limit, server_limit * threads_per_child); } - ap_daemons_limit = server_limit; + active_daemons_limit = server_limit; } - /* ap_daemons_to_start > ap_daemons_limit checked in ap_mpm_run() */ + /* ap_daemons_to_start > active_daemons_limit checked in ap_mpm_run() */ if (ap_daemons_to_start < 1) { if (startup) { ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00517) diff --git a/server/scoreboard.c b/server/scoreboard.c index 8e3403f735..d83a8492c3 100644 --- a/server/scoreboard.c +++ b/server/scoreboard.c @@ -399,7 +399,7 @@ AP_DECLARE(int) ap_find_child_by_pid(apr_proc_t *pid) int i; int max_daemons_limit = 0; - ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_daemons_limit); + ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_daemons_limit); for (i = 0; i < max_daemons_limit; ++i) { if (ap_scoreboard_image->parent[i].pid == pid->pid) { -- 2.40.0