From: Thom Brown Date: Mon, 10 Feb 2014 14:43:50 +0000 (+0000) Subject: Fix various typos. X-Git-Tag: v5.1~49^2 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c72095142f9cd293a23691d7232d5b36e918bd5f;p=pgbadger Fix various typos. --- diff --git a/pgbadger b/pgbadger index 7d30289..0f5fc81 100755 --- a/pgbadger +++ b/pgbadger @@ -184,7 +184,7 @@ sub stop_parsing $interrupt = 1; } -# With multiprocess we need to wait all childs +# With multiprocess we need to wait for all children sub wait_child { my $sig = shift; @@ -275,10 +275,10 @@ if ($ver) { } &usage() if ($help); -# Rewrite some command line argument as lists +# Rewrite some command line arguments as lists &compute_arg_list(); -# Log file to be parsed are passed as command line argument +# Log files to be parsed are passed as command line arguments if ($#ARGV >= 0) { foreach my $file (@ARGV) { if ($file ne '-') { @@ -380,7 +380,7 @@ if ($outfile ne '-') { die "FATAL: output file $outfile is a directory, should be a file\nor maybe you want to use -O | --outdir option instead.\n"; } } elsif (!-d "$outdir") { - # An output directory have been passed as command line parameter + # An output directory has been passed as command line parameter die "FATAL: $outdir is not a directory or doesn't exist.\n"; } $outfile = basename($outfile); @@ -758,13 +758,13 @@ $tmp_last_parsed = 'tmp_' . basename($last_parsed) if ($last_parsed); my $global_totalsize = 0; my @given_log_files = ( @log_files ); -# Verify that the file have not changed for incremental move +# Verify that the file has not changed for incremental move if ( ($saved_last_line{current_pos} > 0) && ($#given_log_files == 0)) { $saved_last_line{current_pos} = 0 if (&check_file_changed($given_log_files[0], $saved_last_line{datetime})); $saved_last_line{current_pos}++ if ($saved_last_line{current_pos} > 0); } -# log files must be erase when loading stats from binary format +# log files must be erased when loading stats from binary format if ($format eq 'binary') { $queue_size = 1; $job_per_file = 1; @@ -817,7 +817,7 @@ if ( ($queue_size > 1) || ($job_per_file > 1) ) { } # Do not use split method with compressed files if ( ($queue_size > 1) && ($logfile !~ /\.(gz|bz2|zip)/i) ) { - # Create multiple process to parse one log file by chunks of data + # Create multiple processes to parse one log file by chunks of data my @chunks = &split_logfile($logfile); for (my $i = 0; $i < $#chunks; $i++) { while ($child_count >= $parallel_process) { @@ -850,7 +850,7 @@ if ( ($queue_size > 1) || ($job_per_file > 1) ) { my $minproc = 1; $minproc = 0 if (!$progress); - # Wait for all child dies less the logger + # Wait for all child processes to die except for the logger while (scalar keys %RUNNING_PIDS > $minproc) { my $kid = waitpid(-1, WNOHANG); if ($kid > 0) { @@ -867,7 +867,7 @@ if ( ($queue_size > 1) || ($job_per_file > 1) ) { # Clear previous statistics &init_stats_vars(); - # Load all data gathered by all the differents processes + # Load all data gathered by all the different processes foreach my $f (@tempfiles) { next if (!-e "$f->[1]" || -z "$f->[1]"); my $fht = new IO::File; @@ -991,7 +991,7 @@ if (!$incremental) { # First clear previous stored statistics &init_stats_vars(); - # Load all data gathered by all the differents processes + # Load all data gathered by all the different processes unless(opendir(DIR, "$outdir/$bpath")) { die "Error: can't opendir $outdir/$bpath: $!"; } @@ -1117,7 +1117,7 @@ if (!$incremental) { }; - # get years directories + # get year directories unless(opendir(DIR, "$outdir")) { die "Error: can't opendir $outdir: $!"; } @@ -1223,7 +1223,7 @@ Options: -d | --dbname database : only report on entries for the given database. -e | --end datetime : end date/time for the data to be parsed in log. -f | --format logtype : possible values: syslog,stderr,csv. Default: stderr. - -G | --nograph : disable graphs on HTML output. Enable by default. + -G | --nograph : disable graphs on HTML output. Enabled by default. -h | --help : show this message and exit. -i | --ident name : programname used as syslog ident. Default: postgres -I | --incremental : use incremental mode, reports will be generated by @@ -1242,7 +1242,7 @@ Options: on the output format: out.html, out.txt or out.tsung. To dump output to stdout use - as filename. -O | --outdir path : directory where out file must be saved. - -p | --prefix string : give here the value of your custom log_line_prefix + -p | --prefix string : the value of your custom log_line_prefix as defined in your postgresql.conf. Only use it if you aren't using one of the standard prefixes specified in the pgBadger documentation, such as if your prefix @@ -1251,7 +1251,7 @@ Options: -P | --no-prettify : disable SQL queries prettify formatter. -q | --quiet : don't print anything to stdout, not even a progress bar. -s | --sample number : number of query samples to store/display. Default: 3 - -S | --select-only : use it if you want to report select queries only. + -S | --select-only : only report SELECT queries. -t | --top number : number of queries to store/display. Default: 20 -T | --title string : change title of the HTML page report. -u | --dbuser username : only report on entries for the given user. @@ -1261,10 +1261,10 @@ Options: -w | --watch-mode : only report errors just like logwatch could do. -x | --extension : output format. Values: text, html or tsung. Default: html -z | --zcat exec_path : set the full path to the zcat program. Use it if - zcat or bzcat or unzip is not on your path. + zcat or bzcat or unzip is not in your path. --pie-limit num : pie data lower than num% will show a sum instead. --exclude-query regex : any query matching the given regex will be excluded - from the report. For example: "^(VACUUM|COMMIT)" + from the report. For example: "^(VACUUM|COMMIT)" You can use this option multiple times. --exclude-file filename: path of the file which contains all the regex to use to exclude queries from the report. One regex per line. @@ -1302,14 +1302,14 @@ Examples: pgbadger -b "2012-06-25 10:56:11" -e "2012-06-25 10:59:11" \ /var/log/postgresql.log cat /var/log/postgres.log | pgbadger - - # log prefix with stderr log output + # Log prefix with stderr log output perl pgbadger --prefix '%t [%p]: [%l-1] user=%u,db=%d,client=%h' \ /pglog/postgresql-2012-08-21* perl pgbadger --prefix '%m %u@%d %p %r %a : ' /pglog/postgresql.log # Log line prefix with syslog log output perl pgbadger --prefix 'user=%u,db=%d,client=%h,appname=%a' \ /pglog/postgresql-2012-08-21* - # Use my 8 CPUs to parse my 10GB file faster, really faster + # Use my 8 CPUs to parse my 10GB file faster, much faster perl pgbadger -j 8 /pglog/postgresql-9.1-main.log @@ -1340,8 +1340,9 @@ use pgbadger as follow to exclude these period from the report: pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log -This will help to not have all COPY order on top of slowest queries. You can -also use --exclude-appname "pg_dump" to solve this problem in a more simple way. +This will help avoid having COPY statements, as generated by pg_dump, on top of +the list of slowest queries. You can also use --exclude-appname "pg_dump" to +solve this problem in a simpler way. }; @@ -1401,7 +1402,7 @@ sub multiprocess_progressbar $0 = 'pgbadger logger'; - # Terminate the process when we doesn't read the complete file but must exit + # Terminate the process when we haven't read the complete file but must exit local $SIG{USR1} = sub { print STDERR "\n"; exit 0; @@ -1618,11 +1619,11 @@ sub process_file # skip non postgresql lines next if ($prefix_vars{'t_ident'} ne $ident); - # Stores temporary files and locks information + # Store temporary files and locks information &store_temporary_and_lock_infos($cur_pid); # Standard syslog format does not have year information, months are - # three letters and day are not always with 2 digit. + # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); @@ -1717,7 +1718,7 @@ sub process_file $cur_info{$cur_pid}{query} .= "\n" . $t_query; } - # Collect orphans lines of multiline queries + # Collect orphaned lines of multiline queries } elsif ($cur_pid && ($line !~ $orphan_syslog_line)) { if (exists $cur_temp_info{$cur_pid}{query}) { @@ -1746,7 +1747,7 @@ sub process_file $prefix_vars{$prefix_params[$i]} = $matches[$i]; } - # Stores temporary files and locks information + # Stores temporary files and lock information &store_temporary_and_lock_infos($cur_pid); if (!$prefix_vars{'t_timestamp'} && $prefix_vars{'t_mtimestamp'}) { @@ -1837,7 +1838,7 @@ sub process_file $cur_info{$cur_pid}{query} .= "\n" . $line; } - # Collect orphans lines of multiline queries + # Collect orphaned lines of multiline queries } elsif ($cur_pid && ($cur_info{$cur_pid}{query})) { $cur_info{$cur_pid}{detail} .= "\n" . $line; @@ -1858,15 +1859,15 @@ sub process_file # Get stats from all pending temporary storage foreach my $pid (sort {$cur_info{$a}{date} <=> $cur_info{$b}{date}} keys %cur_info) { - # Stores last queries information + # Stores last query information &store_queries($pid); } - # Stores last temporary files and locks information + # Stores last temporary files and lock information foreach my $pid (keys %cur_temp_info) { &store_temporary_and_lock_infos($pid); } - # Stores last temporary files and locks information + # Stores last temporary files and lock information foreach my $pid (keys %cur_lock_info) { &store_temporary_and_lock_infos($pid); } @@ -2005,7 +2006,7 @@ sub check_file_changed $prefix_vars{$prefix_params[$i]} = $matches[$i]; } # Standard syslog format does not have year information, months are - # three letters and day are not always with 2 digit. + # three letters and days are not always with 2 digits. if ($prefix_vars{'t_month'} !~ /\d/) { $prefix_vars{'t_year'} = $gyear; $prefix_vars{'t_day'} = sprintf("%02d", $prefix_vars{'t_day'}); @@ -2052,7 +2053,7 @@ sub check_file_changed } -# Method used to check if we have already reach the last parsing position in incremental mode +# Method used to check if we have already reached the last parsing position in incremental mode # This position should have been saved in the incremental file and read in the $last_parsed at # start up. sub check_incremental_position @@ -2083,11 +2084,11 @@ sub check_incremental_position # Stores last queries information &store_queries($pid); } - # Stores last temporary files and locks information + # Stores last temporary files and lock information foreach my $pid (keys %cur_temp_info) { &store_temporary_and_lock_infos($pid); } - # Stores last temporary files and locks information + # Stores last temporary files and lock information foreach my $pid (keys %cur_lock_info) { &store_temporary_and_lock_infos($pid); } @@ -3767,9 +3768,9 @@ sub compute_query_graphs if (exists $q_dataavg{count}) { # Average queries per minute $graph_data{query} .= "[$t, " . int(($q_dataavg{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; - # Maxi queries per minute + # Max queries per minute $graph_data{'query-max'} .= "[$t, " . ($q_dataavg{max}{"$rd"} || 0) . "],"; - # Mini queries per minute + # Min queries per minute $graph_data{'query-min'} .= "[$t, " . ($q_dataavg{min}{"$rd"} || 0) . "],"; # Average duration per minute $graph_data{query4} .= "[$t, " . sprintf("%.3f", ($q_dataavg{duration}{"$rd"} || 0) / ($q_dataavg{count}{"$rd"} || 1)) . "],"; @@ -3777,19 +3778,19 @@ sub compute_query_graphs if (scalar keys %c_dataavg) { # Average connections per minute $graph_data{conn_avg} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; - # Maxi connections per minute + # Max connections per minute $graph_data{conn_max} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; - # Mini connections per minute + # Min connections per minute $graph_data{conn_min} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; } if (scalar keys %s_dataavg) { # Average connections per minute $graph_data{sess_avg} .= "[$t, " . int(($s_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; - # Maxi connections per minute + # Max connections per minute $graph_data{sess_max} .= "[$t, " . ($s_dataavg{max}{"$rd"} || 0) . "],"; - # Mini connections per minute + # Min connections per minute $graph_data{sess_min} .= "[$t, " . ($s_dataavg{min}{"$rd"} || 0) . "],"; } if (!$disable_query && (scalar keys %a_dataavg > 0)) { @@ -3799,9 +3800,9 @@ sub compute_query_graphs # Average queries per minute $graph_data{"$action"} .= "[$t, " . int(($a_dataavg{$action}{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; if ($action eq 'SELECT') { - # Maxi queries per minute + # Max queries per minute $graph_data{"$action-max"} .= "[$t, " . ($a_dataavg{$action}{max}{"$rd"} || 0) . "],"; - # Mini queries per minute + # Min queries per minute $graph_data{"$action-min"} .= "[$t, " . ($a_dataavg{$action}{min}{"$rd"} || 0) . "],"; # Average query duration $graph_data{"$action-2"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{$action}{duration}{"$rd"} || 0) / ($a_dataavg{$action}{count}{"$rd"} || 1)) . "]," if ($action eq 'SELECT'); @@ -4452,7 +4453,7 @@ sub print_checkpoint next if ($t < $t_min); last if ($t > $t_max); - # Average of written checkpoints buffers and wal files + # Average of written checkpoint buffers and wal files if (exists $chk_dataavg{wbuffer}) { $graph_data{wbuffer} .= "[$t, " . ($chk_dataavg{wbuffer}{"$rd"} || 0) . "],"; $graph_data{file_added} .= "[$t, " . ($chk_dataavg{file_added}{"$rd"} || 0) . "],"; @@ -4466,7 +4467,7 @@ sub print_checkpoint $graph_data{$_} =~ s/,$//; } } - # Checkpoints buffers and files + # Checkpoint buffers and files $drawn_graphs{checkpointwritebuffers_graph} = &flotr2_graph($graphid++, 'checkpointwritebuffers_graph', $graph_data{wbuffer}, '', '', 'Checkpoint write buffers (' . $avg_minutes . ' minutes period)', @@ -7848,13 +7849,13 @@ sub validate_log_line { my ($t_pid) = @_; - # Look at particular cas of vacuum/analyze that have the database + # Look at particular cases of vacuum/analyze that have the database # name inside the log message so that they could be associated if ($prefix_vars{'t_query'} =~ / of table "([^\.]+)\.[^\.]+\.[^\.]+":/) { $prefix_vars{'t_dbname'} = $1; } - # Check user and/or database if require + # Check user and/or database if required if ($#dbname >= 0) { # Log line does not match the required dbname @@ -7965,7 +7966,7 @@ sub parse_query # Stores the error's detail if previous line was an error if ($cur_info{$t_pid}{loglevel} =~ $main_error_regex) { - # and current one is a detailed information + # and current one is detailed information if ($prefix_vars{'t_loglevel'} =~ /(DETAIL|STATEMENT|CONTEXT|HINT)/) { $cur_info{$t_pid}{"\L$1\E"} .= $prefix_vars{'t_query'}; return; @@ -7995,7 +7996,7 @@ sub parse_query $lock_info{$1}{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $lock_info{$1}{chronos}{$date_part}{$prefix_vars{'t_hour'}}{duration}++; # Store current lock information that will be used later - # when we will parse the query responsible of the locks + # when we will parse the query responsible for the locks $cur_lock_info{$t_pid}{wait} = $3; if ($format eq 'csv') { $cur_lock_info{$t_pid}{query} = $prefix_vars{'t_statement'}; @@ -8028,7 +8029,7 @@ sub parse_query $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{tempfile}{size} += $1; $tempfile_info{maxsize} = $1 if ($tempfile_info{maxsize} < $1); # Store current temporary file information that will be used later - # when we will parse the query responsible of the tempfile + # when we will parse the query responsible for the tempfile $cur_temp_info{$t_pid}{size} = $1; $overall_stat{'peak'}{$cur_last_log_timestamp}{tempfile_size} += $1; $overall_stat{'peak'}{$cur_last_log_timestamp}{tempfile_count}++; @@ -8135,7 +8136,7 @@ sub parse_query return; } - # Store time in millisecond + # Store time in milliseconds $time =~ /(\d+):(\d+):(\d+\.\d+)/; $time = ($3 * 1000) + ($2 * 60 * 1000) + ($1 * 60 * 60 * 1000); $session_info{count}++; @@ -8313,7 +8314,7 @@ sub parse_query } #### - # Registrer previous query storage into global statistics before starting to store current query + # Register previous query storage into global statistics before starting to store current query #### if (exists $cur_info{$t_pid}{query}) { # when switching to a new log message @@ -8388,7 +8389,7 @@ sub parse_query $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{$t_action}++; # Skipping parse and bind logs return if ($t_action !~ /query|statement|execute/); - # Log line that would not be parse + # Log line that could not be parsed } elsif ($prefix_vars{'t_loglevel'} eq 'LOG') { if ($prefix_vars{'t_query'} !~ /incomplete startup packet|connection|receive|unexpected EOF|still waiting for [^\s]+Lock|checkpoint starting:|could not send data to client|parameter .*configuration file|autovacuum launcher|automatic (analyze|vacuum)|detected deadlock while waiting for/ @@ -8496,7 +8497,7 @@ sub store_queries $cur_info{$t_pid}{query} =~ s/\/\*(.*?)\*\///gs; } - # Stores temporary files and locks information + # Stores temporary files and lock information &store_temporary_and_lock_infos($t_pid); return if (!exists $cur_info{$t_pid}); @@ -8688,7 +8689,7 @@ sub store_queries $normalyzed_info{$normalized}{chronos}{"$cur_day_str"}{"$cur_hour_str"}{count}++; if ($cur_info{$t_pid}{duration}) { - # Updtate top slowest queries statistics + # Update top slowest queries statistics &set_top_slowest($cur_info{$t_pid}{query}, $cur_info{$t_pid}{duration}, $cur_last_log_timestamp, $cur_info{$t_pid}{dbname}, $cur_info{$t_pid}{dbuser}, $cur_info{$t_pid}{dbclient},$cur_info{$t_pid}{dbappname}); # Store normalized query total duration @@ -9935,12 +9936,12 @@ sub get_week_number # %U The week number of the current year as a decimal number, range 00 to 53, starting with the first # Sunday as the first day of week 01. -# %V The ISO 8601 week number (see NOTES) of the current year as a decimal number, range 01 to 53, +# %V The ISO 8601 week number (see NOTES) of the current year as a decimal number, range 01 to 53, # where week 1 is the first week that has at least 4 days in the new year. # %W The week number of the current year as a decimal number, range 00 to 53, starting with the first # Monday as the first day of week 01. - # Check if the date is valide first + # Check if the date is valid first my $datefmt = POSIX::strftime("%F", 1, 1, 1, $day, $month - 1, $year - 1900); if ($datefmt ne "$year-$month-$day") { return -1; @@ -9990,7 +9991,7 @@ sub get_wdays_per_month my $y = $1; my $m = $2; foreach my $day ("01" .. "31") { - # Check if the date is valide first + # Check if the date is valid first my $datefmt = POSIX::strftime("%F", 1, 1, 1, $day, $m - 1, $y - 1900); if ($datefmt ne "$y-$m-$day") { next; @@ -10015,7 +10016,7 @@ sub get_wdays_per_year foreach my $m ("01" .. "12") { foreach my $day ("01" .. "31") { - # Check if the date is valide first + # Check if the date is valid first my $datefmt = POSIX::strftime("%F", 1, 1, 1, $day, $m - 1, $y - 1900); if ($datefmt ne "$y-$m-$day") { next;