From 251bd5fa18e15ccb28e2cbd70ca0c6e0f3aff0c8 Mon Sep 17 00:00:00 2001 From: Darold Gilles Date: Thu, 31 Oct 2013 08:57:17 +0100 Subject: [PATCH] Major update to pgBadger v4.0 --- ChangeLog | 83 + README | 81 +- doc/pgBadger.pod | 65 +- pgbadger | 7141 +++++++++++++++++++++++++++++++++------------- 4 files changed, 5349 insertions(+), 2021 deletions(-) diff --git a/ChangeLog b/ChangeLog index a74c111..319b4f0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,86 @@ +2013-10-31 - Version 4.0 + +This major release is the "Say goodbye to the fouine" release. With a full +rewrite of the reports design, pgBadger has now turn the HTML reports into +a more intuitive user experience and professional look. + +The report is now driven by a dynamic menu with the help of the embedded +boostrap library. Every main menu correspond to hidden an slide that is +brought to front when the menu or one of his submenus is activated. There's +also the embedded font FontAwasome webfont to beautify the report. + +Every statistic report now include a key value section that shows you +immediately some of the relevant informations. Pie charts have also been +separated from their data tables using two tabs, one for the chart and the +other one for the data. + +Tables reporting hourly statistic have been moved to a multiple tabs report +following the data. This is used with General (queries, connections, sessions), +Checkpoints (buffer, files, warnings), Temporary file and Vacuums activities. + +There's some new useful informations shown in the key value sections. Peak +information shows the number and datetime of the highest activity. Here is the +list of those reports: + + - Queries peak + - Read queries peak + - Write queries peak + - Connections peak + - Checkpoints peak + - Wal files usage Peak + - Checkpoints warnings peak + - Temporary file size peak + - Temporary file number peak + +Reports about Checkpoints and Restartpoints have been merge in a single one. +This is the same, outside the fact that restartpoints are on a slave cluster, +so there was no need to separate those informations. + +Recent PostgreSQL versions add additional information about checkpoint, the +number of synced files, the longest sync and the average of sync time per file. +pgBadger collects and shows these informations in the Checkpoint Activity report. + +There's also some new reports: + + - Prepared queries ratio (execute vs prepare) + - Prepared over normal queries + - Queries (select, insert, update, delete) per user/host/application + - Pie charts for tables with the more tuples and pages removed during vacuum. + +The vacuum report will now highlight the costly table during a vacuum or +analyze of a database. + +The errors are now highlighted by a different color following the level. +A LOG level will be green, HINT will be yellow, WARNING orange, ERROR red +and FATAL dark red. + +Some changes in the binary format are not backward compatible and option +--client have been remove as it was replaced by --dbclient for a long time now. + +If you are running a pg_dump or some batch process with very slow queries your +report analyze will be annoyed by those queries taking too much place in the +report. Before that release it was a pain to exclude those queries from the +report. Now you can use the --exclude-time command line option to exclude all +traces matching the given time regexp from the report. For example, let's say +you have a pg_dump at 13:00 each day during half an hour, you can use pgbadger +as follow: + + pgbadger --exclude-time "2013-09-.* 13:.*" postgresql.log + +If your are also running a pg_dump at night, let's say 22:00, you can write it +as follow: + + pgbadger --exclude-time '2013-09-\d+ 13:[0-3]' --exclude-time '2013-09-\d+ 22:[0-3]' postgresql.log + +or more shortly: + + pgbadger --exclude-time '2013-09-\d+ (13|22):[0-3]' postgresql.log + +Exclude time always require the iso notation yyyy-mm-dd hh:mm:ss, even if log +format is syslog. This is the same for all time related options. Take care that +this option has a high cost on the parser performances. + + 2013-09-17 - version 3.6 Still an other version in 3.x branch to fix two major bugs in vacuum and checkpoint diff --git a/README b/README index b8f0eed..44cfbda 100644 --- a/README +++ b/README @@ -84,6 +84,9 @@ SYNOPSIS --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , + --exclude-time regex : any timestamp matching the given regex will be + excluded from the report. Example: "2013-04-12 .*" + You can use this option multiple times. Examples: @@ -121,29 +124,48 @@ SYNOPSIS This supposes that your log file and HTML report are also rotated every week. + If you have a pg_dump at 23:00 and 13:00 each day during half an hour, + you can use pgbadger as follow to exclude these periods from the report: + + pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log + + This will help to not have all COPY order on top of slowest queries. + DESCRIPTION - pgBadger is a PostgreSQL log analyzer built for speed with fully + pgBadger is a PostgreSQL log analyzer build for speed with fully detailed reports from your PostgreSQL log file. It's a single and small - Perl script that aims to replace and out-perform the old PHP script - pgFouine. + Perl script that outperform any other PostgreSQL log analyzer. - By the way, we would like to thank Guillaume Smet for all the work he - has done on this really nice tool. We've been using it a long time, it - is a really great tool! - - pgBadger is written in pure Perl language. It uses a Javascript library - to draw graphs so that you don't need additional Perl modules or any - other package to install. Furthermore, this library gives us additional - features, such as zooming. + It is written in pure Perl language and uses a javascript library + (flotr2) to draw graphs so that you don't need to install any additional + Perl modules or other packages. Furthermore, this library gives us more + features such as zooming. pgBadger also uses the Bootstrap javascript + library and the FontAwesome webfont for better design. Everything is + embedded. pgBadger is able to autodetect your log file format (syslog, stderr or - csvlog). It is designed to parse huge log files, as well as gzip, zip or - bzip2 compressed files. See a complete list of features below. + csvlog). It is designed to parse huge log files as well as gzip + compressed file. See a complete list of features below. + + All charts are zoomable and can be saved as PNG images. + + You can also limit pgBadger to only report errors or remove any part of + the report using command line options. + + pgBadger supports any custom format set into log_line_prefix of your + postgresql.conf file provide that you use the %t, %p and %l patterns. + + pgBadger allow parallel processing on a single log file and multiple + files through the use of the -j option and the number of CPUs as value. + + If you want to save system performance you can also use log_duration + instead of log_min_duration_statement to have reports on duration and + number of queries only. FEATURE pgBadger reports everything about your SQL queries: - Overall statistics. + Overall statistics The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. @@ -156,17 +178,15 @@ FEATURE The following reports are also available with hourly charts divide by periods of five minutes: - Hourly queries statistics. - Hourly temporary file statistics. - Hourly checkpoints statistics. - Hourly restartpoints statistics. - Hourly autovacuum statistics. - Hourly autoanalyze statistics. + SQL queries statistics. + Temporary file statistics. + Checkpoints statistics. + Autovacuum and autoanalyze statistics. There's also some pie reports of distribution about: Locks statistics. - Queries by type (select/insert/update/delete). + ueries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client. Connections per database/user/client. @@ -334,8 +354,8 @@ Parallel processing INSTALLATION Download the tarball from github and unpack the archive as follow: - tar xzf pgbadger-3.x.tar.gz - cd pgbadger-3.x/ + tar xzf pgbadger-4.x.tar.gz + cd pgbadger-4.x/ perl Makefile.PL make && sudo make install @@ -355,8 +375,19 @@ INSTALLATION By default INSTALLDIRS is set to site. AUTHORS - pgBadger is an original work from Gilles Darold. It is maintained by the - good folk at Dalibo and everyone who wants to contribute. + pgBadger is an original work from Gilles Darold. + + The pgBadger logo is an original creation of Damien Clochard. + + The pgBadger v4.x design comes from the "Art is code" company. + + This web site is a work of Gilles Darold. + + pgBadger is maintained by Gilles Darold and the good folks at Dalibo + every one who wants to contribute. + + Many people have contributed to pgBadger, they are all quoted in the + Changelog file. LICENSE pgBadger is free software distributed under the PostgreSQL Licence. diff --git a/doc/pgBadger.pod b/doc/pgBadger.pod index 33fe4fe..8bc14bb 100644 --- a/doc/pgBadger.pod +++ b/doc/pgBadger.pod @@ -86,6 +86,10 @@ Options: --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , + --exclude-time regex : any timestamp matching the given regex will be + excluded from the report. Example: "2013-04-12 .*" + You can use this option multiple times. + Examples: @@ -122,21 +126,36 @@ Generate report every week using incremental behavior: This supposes that your log file and HTML report are also rotated every week. +If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can +use pgbadger as follow to exclude these periods from the report: + + pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log + +This will help to not have all COPY order on top of slowest queries. + =head1 DESCRIPTION -pgBadger is a PostgreSQL log analyzer built for speed with fully detailed reports from your PostgreSQL log file. It's a single and small Perl script that aims to replace and out-perform the old PHP script pgFouine. +pgBadger is a PostgreSQL log analyzer build for speed with fully detailed reports from your PostgreSQL log file. It's a single and small Perl script that outperform any other PostgreSQL log analyzer. + +It is written in pure Perl language and uses a javascript library (flotr2) to draw graphs so that you don't need to install any additional Perl modules or other packages. Furthermore, this library gives us more features such as zooming. pgBadger also uses the Bootstrap javascript library and the FontAwesome webfont for better design. Everything is embedded. + +pgBadger is able to autodetect your log file format (syslog, stderr or csvlog). It is designed to parse huge log files as well as gzip compressed file. See a complete list of features below. -By the way, we would like to thank Guillaume Smet for all the work he has done on this really nice tool. We've been using it a long time, it is a really great tool! +All charts are zoomable and can be saved as PNG images. -pgBadger is written in pure Perl language. It uses a Javascript library to draw graphs so that you don't need additional Perl modules or any other package to install. Furthermore, this library gives us additional features, such as zooming. +You can also limit pgBadger to only report errors or remove any part of the report using command line options. -pgBadger is able to autodetect your log file format (syslog, stderr or csvlog). It is designed to parse huge log files, as well as gzip, zip or bzip2 compressed files. See a complete list of features below. +pgBadger supports any custom format set into log_line_prefix of your postgresql.conf file provide that you use the %t, %p and %l patterns. + +pgBadger allow parallel processing on a single log file and multiple files through the use of the -j option and the number of CPUs as value. + +If you want to save system performance you can also use log_duration instead of log_min_duration_statement to have reports on duration and number of queries only. =head1 FEATURE pgBadger reports everything about your SQL queries: - Overall statistics. + Overall statistics The most frequent waiting queries. Queries that waited the most. Queries generating the most temporary files. @@ -149,17 +168,15 @@ pgBadger reports everything about your SQL queries: The following reports are also available with hourly charts divide by periods of five minutes: - Hourly queries statistics. - Hourly temporary file statistics. - Hourly checkpoints statistics. - Hourly restartpoints statistics. - Hourly autovacuum statistics. - Hourly autoanalyze statistics. + SQL queries statistics. + Temporary file statistics. + Checkpoints statistics. + Autovacuum and autoanalyze statistics. There's also some pie reports of distribution about: Locks statistics. - Queries by type (select/insert/update/delete). + ueries by type (select/insert/update/delete). Distribution of queries type per database/application Sessions per database/user/client. Connections per database/user/client. @@ -318,14 +335,18 @@ with the following template tmp_pgbadgerXXXX.bin so they can be easily identifie Download the tarball from github and unpack the archive as follow: - tar xzf pgbadger-3.x.tar.gz - cd pgbadger-3.x/ + tar xzf pgbadger-4.x.tar.gz + cd pgbadger-4.x/ perl Makefile.PL make && sudo make install -This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation directories for 'site' install. +This will copy the Perl script pgbadger to /usr/local/bin/pgbadger by default and the +man page into /usr/local/share/man/man1/pgbadger.1. Those are the default installation +directories for 'site' install. -If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage into /usr/share/man/man1/pgbadger.1. +If you want to install all under /usr/ location, use INSTALLDIRS='perl' as an argument +of Makefile.PL. The script will be installed into /usr/bin/pgbadger and the manpage +into /usr/share/man/man1/pgbadger.1. For example, to install everything just like Debian does, proceed as follows: @@ -335,7 +356,17 @@ By default INSTALLDIRS is set to site. =head1 AUTHORS -pgBadger is an original work from Gilles Darold. It is maintained by the good folk at Dalibo and everyone who wants to contribute. +pgBadger is an original work from Gilles Darold. + +The pgBadger logo is an original creation of Damien Clochard. + +The pgBadger v4.x design comes from the "Art is code" company. + +This web site is a work of Gilles Darold. + +pgBadger is maintained by Gilles Darold and the good folks at Dalibo every one who wants to contribute. + +Many people have contributed to pgBadger, they are all quoted in the Changelog file. =head1 LICENSE diff --git a/pgbadger b/pgbadger index ab8db11..a319bf1 100755 --- a/pgbadger +++ b/pgbadger @@ -44,7 +44,7 @@ use IO::Handle; use IO::Pipe; use Time::HiRes qw/usleep/; -$VERSION = '3.6'; +$VERSION = '4.0'; $SIG{'CHLD'} = 'DEFAULT'; @@ -56,6 +56,12 @@ my $interrupt = 0; my $tmp_last_parsed = ''; my @SQL_ACTION = ('SELECT', 'INSERT', 'UPDATE', 'DELETE'); my $graphid = 1; +my $NODATA = '
NO DATASET
'; + +my $pgbadger_logo = + ''; +my $pgbadger_ico = + 'data:image/x-icon;base64,AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIiIiAAAAAAA////BxQUFH0FBQWyExMT4wQEBM4ODg7DFRUVe/T09AsAAAAAg4ODAAAAAAAAAAAAAAAAAFJSUgAAAAAAKioqcQICAv9aWlr/MjIy/omJif6Li4v+HR0d/ldXV/8DAwP/Ly8veAAAAABZWVkAAAAAAF9fXwAAAAAAKioqvh4eHv/n5+f/qqqq/s3Nzf4FBQX/AAAA/5KSkv+Kior/6urq/h0dHf8uLi7DAAAAAG1tbQAAAAAAJiYmeCEhIf//////6enp/iMjI//t7e3/l5eX/2dnZ//U1NT/S0tL/9XV1f7/////Hx8f/y8vL34AAAAAvb29HAEBAf/s7Oz+3d3d/j09Pf9TU1P/JCQk//z8/P//////oaGh/xsbG/95eXn/+Pj4/unp6f4DAwP/vLy8GiQkJJJjY2P95+fn/1NTU/9ubm7/UFBQ/yEhIf/m5ub//f39/yEhIf8SEhL/h4eH/4yMjP//////YWFh/w4ODn8CAgLLv7+//09PT/68vLz/ZGRk/zQ0NP8sLCz/9/f3/+Hh4f9gYGD/hYWF/0VFRf9nZ2f//v7+/rm5uf4CAgLLCwsL9efn5/9zc3P/dHR0/wUFBf8MDAz/ampq///////9/f3/FBQU/0BAQP88PDz/WFhY//Ly8v/j4+P+AAAA1QAAANyFhYX+5+fn/09PT/8AAAD/AgIC/9jY2P/+/v7//////y4uLv8AAAD/AAAA/5ycnP+Kior/6Ojo/gQEBOkAAADKVVVV/xgYGP8AAAD/AAAA/yAgIP///////v7+//////87Ozv/AAAA/wAAAP8wMDD/OTk5/7+/v/4AAADMJiYmn5mZmf0AAAD+AAAA/wAAAP9BQUH///////7+/v//////KCgo/wAAAP8AAAD/AAAA/5CQkP9ubm7+MjIynzMzMyFFRUX/zc3N/zw8PP4DAwP/BgYG/9bW1v//////mJiY/wAAAP8AAAD/AAAA/wAAAP5gYGD+BAQE/0JCQiMAAAAAIiIishEREf719fX+6Ojo/3Jycv9XV1f/ZGRk/3d3d/+5ubn/WVlZ/2dnZ/7MzMz+xMTE/h4eHrIAAAAATU1NAAAAAAALCwvhSUlJ/v7+/v7////+//////////////////////////+YmJj/QUFB/g8PD+EAAAAAVVVVAAAAAAAAAAAA////AR8fH70GBgb/goKC/tzc3P7////+/////9zc3P6CgoL+BgYG/xsbG7YAAAAAAAAAAAAAAAAAAAAAAAAAAD8/PwAAAAAARkZGNBkZGa4ODg73AAAA/wAAAP8NDQ31GhoasWFhYUAAAAAAOzs7AAAAAAAAAAAA/D8AAPAPAADAAwAAwAMAAIABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAQAAgAEAAMADAADgBwAA+B8AAA'; #### # method used to fork as many child as wanted @@ -99,7 +105,6 @@ my $ver = ''; my @dbname = (); my @dbuser = (); my @dbclient = (); -my @dbclient2 = (); my @dbappname = (); my @exclude_user = (); my $ident = ''; @@ -118,6 +123,7 @@ my $quiet = 0; my $progress = 1; my $error_only = 0; my @exclude_query = (); +my @exclude_time = (); my $exclude_file = ''; my @include_query = (); my $include_file = ''; @@ -133,7 +139,7 @@ my $disable_checkpoint = 0; my $disable_autovacuum = 0; my $avg_minutes = 5; my $last_parsed = ''; -my $report_title = 'pgBadger: PostgreSQL log analyzer'; +my $report_title = 'PostgreSQL log analyzer'; my $log_line_prefix = ''; my $compiled_prefix = ''; my $project_url = 'http://dalibo.github.com/pgbadger/'; @@ -243,14 +249,12 @@ my $result = GetOptions( "disable-temporary!" => \$disable_temporary, "disable-checkpoint!" => \$disable_checkpoint, "disable-autovacuum!" => \$disable_autovacuum, - "client=s" => \@dbclient2, # Backward compatibility "charset=s" => \$charset, "csv-separator=s" => \$csv_sep_char, + "exclude-time=s" => \@exclude_time, ); die "FATAL: use pgbadger --help\n" if (not $result); -push(@dbclient, @dbclient2); # Backward compatibility - if ($ver) { print "pgBadger version $VERSION\n"; exit 0; @@ -423,6 +427,13 @@ if ($#exclude_query >= 0) { } } +# Testing regex syntax +if ($#exclude_time >= 0) { + foreach my $r (@exclude_time) { + &check_regex($r, '--exclude-time'); + } +} + # Loading included query from file if any if ($include_file) { open(IN, "$include_file") or die "FATAL: can't read file $include_file: $!\n"; @@ -624,6 +635,7 @@ map {$_ = quotemeta($_)} @BRACKETS; # Where statistics are stored my %overall_stat = (); +my %overall_checkpoint = (); my @top_slowest = (); my %normalyzed_info = (); my %error_info = (); @@ -634,10 +646,11 @@ my %tempfile_info = (); my %connection_info = (); my %database_info = (); my %application_info = (); +my %user_info = (); +my %host_info = (); my %session_info = (); my %conn_received = (); my %checkpoint_info = (); -my %restartpoint_info = (); my %autovacuum_info = (); my %autoanalyze_info = (); my @graph_values = (); @@ -650,6 +663,7 @@ our %saved_last_line = (); my %tsung_session = (); my @top_locked_info = (); my @top_tempfile_info = (); +my %drawn_graphs = (); my $t0 = Benchmark->new; @@ -664,7 +678,7 @@ if ($last_parsed && -e $last_parsed) { } } $tmp_last_parsed = 'tmp_' . basename($last_parsed) if ($last_parsed); - + # Main loop reading log files my $global_totalsize = 0; @@ -836,11 +850,7 @@ if ($extension ne 'tsung') { if (!$noprettify) { $sql_prettified = SQL::Beautify->new(keywords => \@pg_keywords); } - if ($error_only) { - &dump_error_as_html(); - } else { - &dump_as_html(); - } + &dump_as_html(); } $fh->close; } else { @@ -937,9 +947,9 @@ Options: queries to include from the report. One regex per line. --disable-error : do not generate error report. --disable-hourly : do not generate hourly report. - --disable-type : do not generate query type report. + --disable-type : do not generate report of queries by type, database... --disable-query : do not generate query reports (slowest, most - frequent, ...). + frequent, queries by users, by database, ...). --disable-session : do not generate session report. --disable-connection : do not generate connection report. --disable-lock : do not generate lock report. @@ -948,6 +958,9 @@ Options: --disable-autovacuum : do not generate autovacuum report. --charset : used to set the HTML charset to be used. Default: utf-8. --csv-separator : used to set the CSV field separator, default: , + --exclude-time regex : any timestamp matching the given regex will be + excluded from the report. Example: "2013-04-12 .*" + You can use this option multiple times. Examples: @@ -985,6 +998,12 @@ Generate report every week using incremental behavior: This supposes that your log file and HTML report are also rotated every week. +If you have a pg_dump at 23:00 and 13:00 each day during half an hour, you can +use pgbadger as follow to exclude these period from the report: + + pgbadger --exclude-time "2013-09-.* (23|13):.*" postgresql.log + +This will help to not have all COPY order on top of slowest queries. }; exit 0; @@ -994,6 +1013,7 @@ sub init_stats_vars { # Empty where statistics are stored %overall_stat = (); + %overall_checkpoint = (); @top_slowest = (); @top_tempfile_info = (); @top_locked_info = (); @@ -1009,7 +1029,6 @@ sub init_stats_vars %session_info = (); %conn_received = (); %checkpoint_info = (); - %restartpoint_info = (); %autovacuum_info = (); %autoanalyze_info = (); @graph_values = (); @@ -1169,7 +1188,7 @@ sub process_file $prefix_vars{'t_hint'} = $row->[15]; $prefix_vars{'t_context'} = $row->[18]; $prefix_vars{'t_statement'} = $row->[19]; - + # Check if the log line should be excluded from the report if (&validate_log_line($prefix_vars{'t_pid'})) { @@ -1263,8 +1282,16 @@ sub process_file "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} $prefix_vars{'t_hour'}:$prefix_vars{'t_min'}:$prefix_vars{'t_sec'}"; # Skip unwanted lines + if ($#exclude_time >= 0) { + foreach (@exclude_time) { + if ($prefix_vars{'t_timestamp'} =~ /$_/) { + return; + } + } + } + next if ($from && ($from gt $prefix_vars{'t_timestamp'})); - if ($to && ($to lt $prefix_vars{'t_timestamp'})) { + if ($to && ($to lt $prefix_vars{'t_timestamp'})) { if ($tmpoutfile) { $pipe->print("$cursize " . ($overall_stat{'queries_number'} - $old_queries_count) . " " . ($overall_stat{'errors_number'} - $old_errors_count) . "\n"); $old_queries_count = $overall_stat{'queries_number'}; @@ -1296,16 +1323,25 @@ sub process_file } } elsif ($goon && ($line =~ $other_syslog_line)) { - $cur_pid = $8; my $t_query = $10; $t_query = $11 if ($format eq 'syslog-ng'); $t_query =~ s/#011/\t/g; next if ($t_query eq "\t"); + if ($cur_info{$cur_pid}{vacuum} && ($t_query =~ /^\t(pages|tuples|buffer usage|avg read rate|system usage):/)) { if ($t_query =~ /^\t(pages|tuples): (\d+) removed, (\d+) remain/) { $autovacuum_info{tables}{$cur_info{$cur_pid}{vacuum}}{$1}{removed} += $2; } + if ($t_query =~ m#^\tsystem usage: CPU .* sec elapsed (.*) sec#) { + if ($1 > $autovacuum_info{peak}{system_usage}{elapsed}) { + $autovacuum_info{peak}{system_usage}{elapsed} = $1; + $autovacuum_info{peak}{system_usage}{table} = $cur_info{$cur_pid}{vacuum}; + $autovacuum_info{peak}{system_usage}{date} = + "$cur_info{$cur_pid}{year}-$cur_info{$cur_pid}{month}-$cur_info{$cur_pid}{day} " . + "$cur_info{$cur_pid}{hour}:$cur_info{$cur_pid}{min}:$cur_info{$cur_pid}{sec}"; + } + } next; } elsif ( $cur_info{$cur_pid}{parameters} && (($t_query =~ /[,\s]*\$(\d+)\s=\s/) || ($t_query =~ /^('[^']*')$/)) ) { # stores bind parameters if any @@ -1368,8 +1404,15 @@ sub process_file $prefix_vars{'t_min'}, $prefix_vars{'t_sec'}) = ($prefix_vars{'t_timestamp'} =~ $time_pattern); # Skip unwanted lines + if ($#exclude_time >= 0) { + foreach (@exclude_time) { + if ($prefix_vars{'t_timestamp'} =~ /$_/) { + return; + } + } + } next if ($from && ($from gt $prefix_vars{'t_timestamp'})); - if ($to && ($to lt $prefix_vars{'t_timestamp'})) { + if ($to && ($to lt $prefix_vars{'t_timestamp'})) { if ($tmpoutfile) { $pipe->print("$cursize " . ($overall_stat{'queries_number'} - $old_queries_count) . " " . ($overall_stat{'errors_number'} - $old_errors_count) . "\n"); $old_queries_count = $overall_stat{'queries_number'}; @@ -1411,6 +1454,15 @@ sub process_file if ($line =~ /^\t(pages|tuples): (\d+) removed, (\d+) remain/) { $autovacuum_info{tables}{$cur_info{$cur_pid}{vacuum}}{$1}{removed} += $2; } + if ($line =~ m#^\tsystem usage: CPU .* sec elapsed (.*) sec#) { + if ($1 > $autovacuum_info{peak}{system_usage}{elapsed}) { + $autovacuum_info{peak}{system_usage}{elapsed} = $1; + $autovacuum_info{peak}{system_usage}{table} = $cur_info{$cur_pid}{vacuum}; + $autovacuum_info{peak}{system_usage}{date} = + "$cur_info{$cur_pid}{year}-$cur_info{$cur_pid}{month}-$cur_info{$cur_pid}{day} " . + "$cur_info{$cur_pid}{hour}:$cur_info{$cur_pid}{min}:$cur_info{$cur_pid}{sec}"; + } + } next; } elsif ( $cur_info{$cur_pid}{parameters} && (($line =~ /[,\s]*\$(\d+)\s=\s/) || ($line =~ /^'[^']*'$/)) ) { # stores bind parameters if any @@ -1450,7 +1502,7 @@ sub process_file # Get stats from all pending temporary storage foreach my $pid (sort {$cur_info{$a}{date} <=> $cur_info{$b}{date}} keys %cur_info) { - # Stores lat queries informations + # Stores last queries informations &store_queries($pid); } @@ -1508,7 +1560,7 @@ sub process_file &logmsg('ERROR', "can't save last parsed line into $tmp_last_parsed, $!"); } } - + return $getout; } @@ -1614,6 +1666,35 @@ sub comma_numbers return scalar reverse $text; } +# Format numbers with comma for better reading +sub pretty_print_size +{ + my $val = shift; + return 0 if (!$val); + + if ($val >= 1125899906842624) { + $val = ($val / 1125899906842624); + $val = sprintf("%0.2f", $val) . " PiB"; + } elsif ($val >= 1099511627776) { + $val = ($val / 1099511627776); + $val = sprintf("%0.2f", $val) . " TiB"; + } elsif ($val >= 1073741824) { + $val = ($val / 1073741824); + $val = sprintf("%0.2f", $val) . " GiB"; + } elsif ($val >= 1048576) { + $val = ($val / 1048576); + $val = sprintf("%0.2f", $val) . " MiB"; + } elsif ($val >= 1024) { + $val = ($val / 1024); + $val = sprintf("%0.2f", $val) . " KiB"; + } else { + $val = $val . " B"; + } + + return $val; +} + + # Format duration sub convert_time { @@ -1632,6 +1713,7 @@ sub convert_time $days = $days < 1 ? '' : $days . 'd'; $hours = $hours < 1 ? '' : $hours . 'h'; $minutes = $minutes < 1 ? '' : $minutes . 'm'; + $seconds =~ s/\.\d+$// if ($minutes); $time = $days . $hours . $minutes . $seconds . 's'; return $time; @@ -1711,8 +1793,7 @@ sub set_top_error_sample # Stop when we have our number of samples if (!exists $error_info{$q}{date} || ($#{$error_info{$q}{date}} < $sample)) { - if ( ($q =~ /deadlock detected/) || ($statement && !grep(/\Q$statement\E/, @{$error_info{$q}{statement}})) - || ($real_error && !grep(/\Q$real_error\E/, @{$error_info{$q}{error}})) ) { + if ( ($q =~ /deadlock detected/) || ($real_error && !grep(/\Q$real_error\E/, @{$error_info{$q}{error}})) ) { push(@{$error_info{$q}{date}}, $date); push(@{$error_info{$q}{detail}}, $detail); push(@{$error_info{$q}{context}}, $context); @@ -1738,7 +1819,7 @@ sub dump_as_text $logfile_str .= ', ..., ' . $log_files[-1]; } print $fh qq{ -$report_title +pgBadger :: $report_title - Global information --------------------------------------------------- @@ -1749,9 +1830,9 @@ Log start from $overall_stat{'first_log_ts'} to $overall_stat{'last_log_ts'} }; # Overall statistics - my $fmt_unique = &comma_numbers(scalar keys %normalyzed_info) || 0; - my $fmt_queries = &comma_numbers($overall_stat{'queries_number'}) || 0; - my $fmt_duration = &convert_time($overall_stat{'queries_duration'}) || 0; + my $fmt_unique = &comma_numbers(scalar keys %normalyzed_info); + my $fmt_queries = &comma_numbers($overall_stat{'queries_number'}); + my $fmt_duration = &convert_time($overall_stat{'queries_duration'}); $overall_stat{'first_query_ts'} ||= '-'; $overall_stat{'last_query_ts'} ||= '-'; print $fh qq{ @@ -1764,13 +1845,13 @@ Total query duration: $fmt_duration First query: $overall_stat{'first_query_ts'} Last query: $overall_stat{'last_query_ts'} }; - foreach (sort {$overall_stat{'query_peak'}{$b} <=> $overall_stat{'query_peak'}{$a}} keys %{$overall_stat{'query_peak'}}) { - print $fh "Query peak: ", &comma_numbers($overall_stat{'query_peak'}{$_}), " queries/s at $_"; + foreach (sort {$overall_stat{'peak'}{$b}{query} <=> $overall_stat{'peak'}{$a}{query}} keys %{$overall_stat{'peak'}}) { + print $fh "Query peak: ", &comma_numbers($overall_stat{'peak'}{$_}{query}), " queries/s at $_"; last; } if (!$disable_error) { - my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}) || 0; - my $fmt_unique_error = &comma_numbers(scalar keys %{$overall_stat{'unique_normalized_errors'}}) || 0; + my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}); + my $fmt_unique_error = &comma_numbers(scalar keys %error_info); print $fh qq{ Number of events: $fmt_errors Number of unique normalized events: $fmt_unique_error @@ -1794,6 +1875,10 @@ Average duration of sessions: $avg_session_duration } if (!$disable_connection && $connection_info{count}) { print $fh "Total number of connections: $connection_info{count}\n"; + foreach (sort {$overall_stat{'peak'}{$b}{connection} <=> $overall_stat{'peak'}{$a}{connection}} keys %{$overall_stat{'peak'}}) { + print $fh "Connection peak: ", &comma_numbers($overall_stat{'peak'}{$_}{connection}), " conn/s at $_"; + last; + } } if (scalar keys %database_info > 1) { print $fh "Total number of databases: ", scalar keys %database_info, "\n"; @@ -1850,6 +1935,35 @@ Report not supported by text format } } } + + # Show request per user statistics + if (scalar keys %user_info > 1) { + print $fh "\n- Request per user ------------------------------------------------------\n\n"; + print $fh "User Request type Count\n"; + foreach my $d (sort keys %user_info) { + print $fh "$d - ", &comma_numbers($user_info{$d}{count}), "\n"; + foreach my $r (sort keys %{$user_info{$d}}) { + next if ($r eq 'count'); + print $fh "\t$r ", &comma_numbers($user_info{$d}{$r}), "\n"; + } + } + } + + # Show request per user statistics + if (scalar keys %user_info > 1) { + print $fh "\n- Request per user ------------------------------------------------------\n\n"; + print $fh "Host Request type Count\n"; + foreach my $d (sort keys %user_info) { + print $fh "$d - ", &comma_numbers($user_info{$d}{count}), "\n"; + foreach my $r (sort keys %{$user_info{$d}}) { + next if ($r eq 'count'); + print $fh "\t$r ", &comma_numbers($user_info{$d}{$r}), "\n"; + } + } + } + + + } if (!$disable_lock && scalar keys %lock_info > 0) { @@ -2196,7 +2310,7 @@ sub dump_error_as_text $logfile_str .= ', ..., ' . $log_files[-1]; } print $fh qq{ -$report_title +pgBadger :: $report_title - Global information --------------------------------------------------- @@ -2283,415 +2397,203 @@ sub show_error_as_text } } -sub get_page_style -{ - return qq{ - -}; - -} - - sub html_header { my $date = localtime(time); - my $style = &get_page_style(); - print $fh qq{ - + my $global_info = &print_global_information(); + + my @jscode = ; + + print $fh qq{ + -$report_title +pgBadger :: $report_title -$style -}; - if (!$nograph) { - my @jscode = ; - print $fh < @jscode -EOF - } - print $fh qq{ -
- -

$report_title

-}; - print $fh qq{ - + + +


+
+ +
    +}; } sub html_footer { print $fh qq{ -

     

    - + +
+
+ + - -
-
Table of contents
+
+
-}; - print $fh qq{ + }; @@ -2712,15 +2614,13 @@ sub print_global_information if ($#log_files > 0) { $logfile_str .= ', ..., ' . $log_files[-1]; } - print $fh qq{ -
+ return qq{
  • Generated on $curdate
  • Log file: $logfile_str
  • Parsed $fmt_nlines log entries in $total_time
  • Log start from $overall_stat{'first_log_ts'} to $overall_stat{'last_log_ts'}
-
}; } @@ -2728,259 +2628,548 @@ sub print_global_information sub print_overall_statistics { - my $fmt_unique = &comma_numbers(scalar keys %normalyzed_info) || 0; - my $fmt_queries = &comma_numbers($overall_stat{'queries_number'}) || 0; - my $fmt_duration = &convert_time($overall_stat{'queries_duration'}) || 0; + my $fmt_unique = &comma_numbers(scalar keys %normalyzed_info); + my $fmt_queries = &comma_numbers($overall_stat{'queries_number'}); + my $fmt_duration = &convert_time($overall_stat{'queries_duration'}); $overall_stat{'first_query_ts'} ||= '-'; $overall_stat{'last_query_ts'} ||= '-'; - print $fh qq{ -
-

Overall statistics ^

-
- + + + + + + + }; + $count = &comma_numbers($cur_period_info{'SELECT'}{count}); + $average = &convert_time($cur_period_info{'SELECT'}{average}); + $select_queries .= qq{ + + + + + + }; + my $insert_count = &comma_numbers($cur_period_info{'INSERT'}{count}); + my $update_count = &comma_numbers($cur_period_info{'UPDATE'}{count}); + my $delete_count = &comma_numbers($cur_period_info{'DELETE'}{count}); + my $write_average = &convert_time($write_average_duration / ($write_average_count || 1)); + $write_queries .= qq{ + + + + + + + + }; + my $prepare_count = &comma_numbers($cur_period_info{prepare}); + my $execute_count = &comma_numbers($cur_period_info{execute}); + my $bind_prepare = &comma_numbers(sprintf("%.2f", $cur_period_info{execute}/($cur_period_info{prepare}||1))); + my $prepare_usual = &comma_numbers(sprintf("%.2f", ($cur_period_info{prepare}/($cur_period_info{usual}||1)) * 100)) . "%"; + $prepared_queries .= qq{ + + + + + + + + }; + $count = &comma_numbers($connection_info{chronos}{"$d"}{"$h"}{count}); + $average = &comma_numbers(sprintf("%0.2f", $connection_info{chronos}{"$d"}{"$h"}{count} / 3600)); + $connections .= qq{ + + + + + + }; + $count = &comma_numbers($session_info{chronos}{"$d"}{"$h"}{count}); + $cur_period_info{'session'}{average} = + $session_info{chronos}{"$d"}{"$h"}{duration} / ($session_info{chronos}{"$d"}{"$h"}{count} || 1); + $average = &convert_time($cur_period_info{'session'}{average}); + $sessions .= qq{ + + + + + + }; + } + } + + # Set default values + $queries = qq{} if (!$queries); + $select_queries = qq{} if (!$select_queries); + $write_queries = qq{} if (!$write_queries); + $prepared_queries = qq{} if (!$prepared_queries); + $connections = qq{} if (!$connections); + $sessions = qq{} if (!$sessions); + + print $fh qq{ +
+

General Activity

+
-
    -
  • Number of unique normalized queries: $fmt_unique
  • -
  • Number of queries: $fmt_queries
  • -
  • Total query duration: $fmt_duration
  • -
  • First query: $overall_stat{'first_query_ts'}
  • -
  • Last query: $overall_stat{'last_query_ts'}
  • -}; - foreach (sort {$overall_stat{'query_peak'}{$b} <=> $overall_stat{'query_peak'}{$a}} keys %{$overall_stat{'query_peak'}}) { - print $fh "
  • Query peak: ", &comma_numbers($overall_stat{'query_peak'}{$_}), " queries/s at $_
  • "; + my $query_peak = 0; + my $query_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{query} <=> $overall_stat{'peak'}{$a}{query}} keys %{$overall_stat{'peak'}}) { + $query_peak = &comma_numbers($overall_stat{'peak'}{$_}{query}); + $query_peak_date = $_; last; } - if (!$disable_error) { - my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}) || 0; - my $fmt_unique_error = &comma_numbers(scalar keys %{$overall_stat{'unique_normalized_errors'}}) || 0; - print $fh qq{ -
  • Number of events: $fmt_errors
  • -
  • Number of unique normalized events: $fmt_unique_error
  • -}; - } - if ($autovacuum_info{count}) { - print $fh qq{ -
  • Total number of automatic vacuums: $autovacuum_info{count}
  • -}; - } - if ($autoanalyze_info{count}) { - print $fh qq{ -
  • Total number of automatic analyzes: $autoanalyze_info{count}
  • -}; + my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}); + my $fmt_unique_error = &comma_numbers(scalar keys %error_info); + my $autovacuum_count = &comma_numbers($autovacuum_info{count}); + my $autoanalyze_count = &comma_numbers($autoanalyze_info{count}); + my $tempfile_count = &comma_numbers($tempfile_info{count}); + my $fmt_temp_maxsise = &comma_numbers($tempfile_info{maxsize}); + my $fmt_temp_avsize = &comma_numbers(sprintf("%.2f", $tempfile_info{size} / ($tempfile_info{count} || 1))); + my $session_count = &comma_numbers($session_info{count}); + my $avg_session_duration = &convert_time($session_info{duration} / ($session_info{count} || 1)); + my $tot_session_duration = &convert_time($session_info{duration}); + my $connection_count = &comma_numbers($connection_info{count}); + my $connection_peak = 0; + my $connection_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{connection} <=> $overall_stat{'peak'}{$a}{connection}} keys %{$overall_stat{'peak'}}) { + $connection_peak = &comma_numbers($overall_stat{'peak'}{$_}{connection}); + $connection_peak_date = $_; + last; } + my $db_count = scalar keys %database_info; print $fh qq{ -
-
-
    -}; - if ($tempfile_info{count}) { - my $fmt_temp_maxsise = &comma_numbers($tempfile_info{maxsize}); - my $fmt_temp_avsize = &comma_numbers(sprintf("%.2f", $tempfile_info{size} / $tempfile_info{count})); - print $fh qq{ -
  • Number of temporary files: $tempfile_info{count}
  • -
  • Max size of temporary files: $fmt_temp_maxsise
  • -
  • Average size of temporary files: $fmt_temp_avsize
  • +

    Overview

    + +
    +

    Global Stats

    +
    + +
    +
    +
      +
    • $fmt_unique Number of unique normalized queries
    • +
    • $fmt_queries Number of queries
    • +
    • $fmt_duration Total query duration
    • +
    • $overall_stat{first_query_ts} First query
    • +
    • $overall_stat{last_query_ts} Last query
    • +
    • $query_peak queries/s at $query_peak_date Query peak
    • +
    +
    +
    +
      +
    • $fmt_errors Number of events
    • +
    • $fmt_unique_error Number of unique normalized events
    • +
    +
    +
    +
      +
    • $autovacuum_count Total number of automatic vacuums
    • +
    • $autoanalyze_count Total number of automatic analyzes
    • +
    +
    +
    +
      +
    • $tempfile_count Number temporary file
    • +
    • $fmt_temp_maxsise Max size of temporary file
    • +
    • $fmt_temp_avsize Average size of temporary file
    • +
    +
    +
    +
      +
    • $session_count Total number of sessions
    • +
    • $tot_session_duration Total duration of sessions
    • +
    • $avg_session_duration Average duration of sessions
    • +
    +
    +
    +
      +
    • $connection_count Total number of connections
    • +
    • $connection_peak connections/s at $connection_peak_date Connection peak
    • +
    • $db_count Total number of databases
    • +
    +
    +
    +
    +
    }; - } - if (!$disable_session && $session_info{count}) { - my $avg_session_duration = &convert_time($session_info{duration} / $session_info{count}); - my $tot_session_duration = &convert_time($session_info{duration}); - print $fh qq{ -
  • Total number of sessions: $session_info{count}
  • -
  • Total duration of sessions: $tot_session_duration
  • -
  • Average duration of sessions: $avg_session_duration
  • + +} + +sub print_general_activity +{ + my $queries = ''; + my $select_queries = ''; + my $write_queries = ''; + my $prepared_queries = ''; + my $connections = ''; + my $sessions = ''; + foreach my $d (sort {$a <=> $b} keys %per_minute_info) { + my $c = 1; + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { + my %cur_period_info = (); + my $write_average_duration = 0; + my $write_average_count = 0; + foreach my $m (keys %{$per_minute_info{$d}{$h}}) { + $cur_period_info{count} += ($per_minute_info{$d}{$h}{$m}{query}{count} || 0); + $cur_period_info{duration} += ($per_minute_info{$d}{$h}{$m}{query}{duration} || 0); + $cur_period_info{min} = $per_minute_info{$d}{$h}{$m}{query}{duration} if (!exists $cur_period_info{min} || ($per_minute_info{$d}{$h}{$m}{query}{duration} < $cur_period_info{min})); + $cur_period_info{max} = $per_minute_info{$d}{$h}{$m}{query}{duration} if (!exists $cur_period_info{max} || ($per_minute_info{$d}{$h}{$m}{query}{duration} > $cur_period_info{max})); + foreach my $a (@SQL_ACTION) { + $cur_period_info{$a}{count} += ($per_minute_info{$d}{$h}{$m}{$a}{count} || 0); + $cur_period_info{$a}{duration} += ($per_minute_info{$d}{$h}{$m}{$a}{duration} || 0); + $cur_period_info{usual} += ($per_minute_info{$d}{$h}{$m}{$a}{count} || 0); + } + $cur_period_info{prepare} += ($per_minute_info{$d}{$h}{$m}{prepare} || 0); + $cur_period_info{execute} += ($per_minute_info{$d}{$h}{$m}{execute} || 0); + } + + $cur_period_info{average} = $cur_period_info{duration} / ($cur_period_info{count} || 1); + $cur_period_info{'SELECT'}{average} = $cur_period_info{'SELECT'}{duration} / ($cur_period_info{'SELECT'}{count} || 1); + $write_average_duration = ($cur_period_info{'INSERT'}{duration} + + $cur_period_info{'UPDATE'}{duration} + + $cur_period_info{'DELETE'}{duration}); + $write_average_count = ($cur_period_info{'INSERT'}{count} + + $cur_period_info{'UPDATE'}{count} + + $cur_period_info{'DELETE'}{count}); + $zday = " " if ($c > 1); + $c++; + + my $count = &comma_numbers($cur_period_info{count}); + my $min = &convert_time($cur_period_info{min}); + my $max = &convert_time($cur_period_info{max}); + my $average = &convert_time($cur_period_info{average}); + $queries .= qq{ +
$zday$h$count$min$max$average
$zday$h$count$average
$zday$h$insert_count$update_count$delete_count$write_average
$zday$h$prepare_count$execute_count$bind_prepare$prepare_usual
$zday$h$count$average/s
$zday$h$count$average
$NODATA
$NODATA
$NODATA
$NODATA
$NODATA
$NODATA
+ + + + + + + + + + + $queries + +
DayHourCountMin durationMax durationAvg duration
+
+
+ + + + + + + + + + $select_queries + +
DayHourCountAverage Duration
+
+
+ + + + + + + + + + + + $write_queries + +
DayHourINSERTUPDATEDELETEAverage Duration
+
+
+ + + + + + + + + + + + $prepared_queries + +
DayHourPrepareBindBind/PreparePercentage of prepare
+
+
+ + + + + + + + + + $connections + +
DayHourCountAverage / Second
+
+
+ + + + + + + + + + $sessions + +
DayHourCountAverage Duration
+
+
+ Back to the top of the General Activity table + + + }; + +} + +sub print_sql_traffic +{ + + my $bind_vs_prepared = sprintf("%.2f", $overall_stat{'execute'} / ($overall_stat{'prepare'} || 1)); + my $total_usual_queries = 0; + map { $total_usual_queries += $overall_stat{$_}; } @SQL_ACTION; + my $prepared_vs_normal = sprintf("%.2f", ($overall_stat{'execute'} / ($total_usual_queries || 1))*100); + + my $query_peak = 0; + my $query_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{query} <=> $overall_stat{'peak'}{$a}{query}} keys %{$overall_stat{'peak'}}) { + $query_peak = &comma_numbers($overall_stat{'peak'}{$_}{query}); + $query_peak_date = $_; + last; } - if (!$disable_connection && $connection_info{count}) { - print $fh qq{ -
  • Total number of connections: $connection_info{count}
  • -}; + + my $select_peak = 0; + my $select_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{select} <=> $overall_stat{'peak'}{$a}{select}} keys %{$overall_stat{'peak'}}) { + $select_peak = &comma_numbers($overall_stat{'peak'}{$_}{select}); + $select_peak_date = $_; + last; } - if (scalar keys %database_info > 1) { - my $db_count = scalar keys %database_info; - print $fh qq{ -
  • Total number of databases: $db_count
  • -}; + + my $write_peak = 0; + my $write_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{write} <=> $overall_stat{'peak'}{$a}{write}} keys %{$overall_stat{'peak'}}) { + $write_peak = &comma_numbers($overall_stat{'peak'}{$_}{write}); + $write_peak_date = $_; + last; } + my $fmt_duration = &convert_time($overall_stat{'queries_duration'}); print $fh qq{ - - - +
    +

    SQL Traffic

    +
    +

    Key values

    +
    +
      +
    • $query_peak queries/s Query Peak
    • +
    • $query_peak_date Date
    • +
    +
    +
    +
    +

    Queries per second ($avg_minutes minutes average)

    +$drawn_graphs{queriespersecond_graph} +
    +
    }; + delete $drawn_graphs{queriespersecond_graph}; -} + print $fh qq{ +
    +

    SELECT Traffic

    +
    +

    Key values

    +
    +
      +
    • $select_peak queries/s Query Peak
    • +
    • $select_peak_date Date
    • +
    +
    +
    +
    +

    SELECT queries per second ($avg_minutes minutes average)

    +$drawn_graphs{selectqueries_graph} +
    +
    +}; + delete $drawn_graphs{selectqueries_graph}; -sub print_hourly_reports -{ - if ($overall_stat{'queries_number'} || exists $connection_info{chronos}) { - print $fh qq{ -

    Hourly statistics ^

    + print $fh qq{ +
    +

    INSERT/UPDATE/DELETE Traffic

    +
    +

    Key values

    +
    +
      +
    • $write_peak queries/s Query Peak
    • +
    • $write_peak_date Date
    • +
    +
    +
    +
    +

    Write queries per second ($avg_minutes minutes average)

    +$drawn_graphs{writequeries_graph} +
    +
    }; - } - if ($overall_stat{'queries_number'}) { - print $fh qq{ - - - - - - - -}; - if (exists $connection_info{chronos}) { - print $fh " \n"; - } - if (exists $session_info{chronos}) { - print $fh " \n"; - } - print $fh qq{ - - - - - - - - - - + delete $drawn_graphs{writequeries_graph}; + + print $fh qq{ +
    +

    Queries duration

    +
    +

    Key values

    +
    +
      +
    • $fmt_duration Total query duration
    • +
    +
    +
    +
    +

    Average queries duration ($avg_minutes minutes average)

    +$drawn_graphs{durationqueries_graph} +
    +
    }; - if (exists $connection_info{chronos}) { - print $fh " \n"; - } - if (exists $session_info{chronos}) { - print $fh " \n"; - } - print $fh qq{ - + delete $drawn_graphs{durationqueries_graph}; + + print $fh qq{ +
    +

    Prepared queries ratio

    +
    +

    Key values

    +
    +
      +
    • $bind_vs_prepared Ratio of bind vs prepare
    • +
    • $prepared_vs_normal % Ratio between prepared and "usual" statements
    • +
    +
    +
    +
    +

    Ratio of bind vs prepare statements ($avg_minutes minutes average)

    +$drawn_graphs{bindpreparequeries_graph} +
    +
    }; + delete $drawn_graphs{bindpreparequeries_graph}; - foreach my $d (sort {$a <=> $b} keys %per_minute_info) { - my $c = 1; - $d =~ /^\d{4}(\d{2})(\d{2})$/; - my $zday = "$abbr_month{$1} $2"; - foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { - my %cur_period_info = (); - my $write_average_duration = 0; - my $write_average_count = 0; - foreach my $m (keys %{$per_minute_info{$d}{$h}}) { - $cur_period_info{count} += ($per_minute_info{$d}{$h}{$m}{query}{count} || 0); - $cur_period_info{duration} += ($per_minute_info{$d}{$h}{$m}{query}{duration} || 0); - $cur_period_info{min} = $per_minute_info{$d}{$h}{$m}{query}{duration} if (!exists $cur_period_info{min} || ($per_minute_info{$d}{$h}{$m}{query}{duration} < $cur_period_info{min})); - $cur_period_info{max} = $per_minute_info{$d}{$h}{$m}{query}{duration} if (!exists $cur_period_info{max} || ($per_minute_info{$d}{$h}{$m}{query}{duration} > $cur_period_info{max})); - foreach my $a (@SQL_ACTION) { - $cur_period_info{$a}{count} += ($per_minute_info{$d}{$h}{$m}{$a}{count} || 0); - $cur_period_info{$a}{duration} += ($per_minute_info{$d}{$h}{$m}{$a}{duration} || 0); - } - } +} - $cur_period_info{average} = $cur_period_info{duration} / ($cur_period_info{count} || 1); - $cur_period_info{'SELECT'}{average} = $cur_period_info{'SELECT'}{duration} / ($cur_period_info{'SELECT'}{count} || 1); - $write_average_duration = ($cur_period_info{'INSERT'}{duration} + - $cur_period_info{'UPDATE'}{duration} + - $cur_period_info{'DELETE'}{duration}); - $write_average_count = ($cur_period_info{'INSERT'}{count} + - $cur_period_info{'UPDATE'}{count} + - $cur_period_info{'DELETE'}{count}); - my $colb = $c % 2; - $zday = " " if ($c > 1); - my $write_average = $write_average_duration / ($write_average_count || 1); - print $fh ""; - if (exists $connection_info{chronos}) { - print $fh ""; - } - if (exists $session_info{chronos}) { - $cur_period_info{'session'}{average} = - $session_info{chronos}{"$d"}{"$h"}{duration} / ($session_info{chronos}{"$d"}{"$h"}{count} || 1); - print $fh ""; - } - print $fh "\n"; - $c++; - } - } +sub compute_query_graphs +{ + my %graph_data = (); + if ($graph) { + foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { + $tm =~ /(\d{4})(\d{2})(\d{2})/; + my $y = $1 - 1900; + my $mo = $2 - 1; + my $d = $3; + foreach my $h ("00" .. "23") { + next if (!exists $per_minute_info{$tm}{$h}); + my %q_dataavg = (); + my %a_dataavg = (); + my %c_dataavg = (); + my %p_dataavg = (); + foreach my $m ("00" .. "59") { + next if (!exists $per_minute_info{$tm}{$h}{$m}); - print $fh "
    DayHourQueriesSELECT queriesWrite queriesConnectionsSessions
    CountMin/Max/Avg duration CountAvg duration INSERTUPDATEDELETEAvg duration CountAvg/sCountAvg duration 
    $zday$h", - &comma_numbers($cur_period_info{count}), "", - &convert_time($cur_period_info{min}),"/",&convert_time($cur_period_info{max}),"/",&convert_time($cur_period_info{average}), "", - &comma_numbers($cur_period_info{'SELECT'}{count} || 0), "", - &convert_time($cur_period_info{'SELECT'}{average} || 0), "", - &comma_numbers($cur_period_info{'INSERT'}{count} || 0), "", - &comma_numbers($cur_period_info{'UPDATE'}{count} || 0), "", - &comma_numbers($cur_period_info{'DELETE'}{count} || 0), "", - &convert_time($write_average), "", &comma_numbers($connection_info{chronos}{"$d"}{"$h"}{count}), - "", - &comma_numbers(sprintf("%0.2f", $connection_info{chronos}{"$d"}{"$h"}{count} / 3600)), "/s", &comma_numbers($session_info{chronos}{"$d"}{"$h"}{count}), - "", &convert_time($cur_period_info{'session'}{average}), "
    \n"; + my $rd = &average_per_minutes($m, $avg_minutes); - if ($graph) { - my %graph_data = (); - foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { - $tm =~ /(\d{4})(\d{2})(\d{2})/; - my $y = $1 - 1900; - my $mo = $2 - 1; - my $d = $3; - foreach my $h ("00" .. "23") { - next if (!exists $per_minute_info{$tm}{$h}); - my %q_dataavg = (); - my %a_dataavg = (); - my %c_dataavg = (); - foreach my $m ("00" .. "59") { - next if (!exists $per_minute_info{$tm}{$h}{$m}); - - my $rd = &average_per_minutes($m, $avg_minutes); - - if (exists $per_minute_info{$tm}{$h}{$m}{query}) { + $p_dataavg{prepare}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{prepare} + if (exists $per_minute_info{$tm}{$h}{$m}{prepare}); + $p_dataavg{prepare}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{prepare} + if (exists $per_minute_info{$tm}{$h}{$m}{parse}); + $p_dataavg{execute}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{execute} + if (exists $per_minute_info{$tm}{$h}{$m}{execute}); - # Average per minute - $q_dataavg{count}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{query}{count}; - if (exists $per_minute_info{$tm}{$h}{$m}{query}{duration}) { - $q_dataavg{duration}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{query}{duration}; - } + if (exists $per_minute_info{$tm}{$h}{$m}{query}) { - # Search minimum and maximum during this minute - foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{query}{second}}) { - $q_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{query}{second}{$s} - if ($per_minute_info{$tm}{$h}{$m}{query}{second}{$s} > $q_dataavg{max}{"$rd"}); - $q_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{query}{second}{$s} - if (not exists $q_dataavg{min}{"$rd"} - || ($per_minute_info{$tm}{$h}{$m}{query}{second}{$s} < $q_dataavg{min}{"$rd"})); - } + # Average per minute + $q_dataavg{count}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{query}{count}; + if (exists $per_minute_info{$tm}{$h}{$m}{query}{duration}) { + $q_dataavg{duration}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{query}{duration}; + } - if (!$disable_query) { - foreach my $action (@SQL_ACTION) { - next if (!$per_minute_info{$tm}{$h}{$m}{$action}{count}); - $a_dataavg{$action}{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{count} || 0); - $a_dataavg{$action}{duration}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{duration} || 0); - if ( ($action ne 'SELECT') && exists $per_minute_info{$tm}{$h}{$m}{$action}{count}) { - $a_dataavg{write}{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{count} || 0); - $a_dataavg{write}{duration}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{duration} || 0); - } - } - } + # Search minimum and maximum during this minute + foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{query}{second}}) { + $q_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{query}{second}{$s} + if ($per_minute_info{$tm}{$h}{$m}{query}{second}{$s} > $q_dataavg{max}{"$rd"}); + $q_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{query}{second}{$s} + if (not exists $q_dataavg{min}{"$rd"} + || ($per_minute_info{$tm}{$h}{$m}{query}{second}{$s} < $q_dataavg{min}{"$rd"})); } - if (exists $per_minute_info{$tm}{$h}{$m}{connection}) { - # Average per minute - $c_dataavg{average}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{connection}{count}; - - # Search minimum and maximum during this minute - foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{connection}{second}}) { - $c_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} - if ($per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} > $c_dataavg{max}{"$rd"}); - $c_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} - if (not exists $c_dataavg{min}{"$rd"} - || ($per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} < $c_dataavg{min}{"$rd"})); + if (!$disable_query) { + foreach my $action (@SQL_ACTION) { + next if (!$per_minute_info{$tm}{$h}{$m}{$action}{count}); + $a_dataavg{$action}{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{count} || 0); + $a_dataavg{$action}{duration}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{duration} || 0); + if ( ($action ne 'SELECT') && exists $per_minute_info{$tm}{$h}{$m}{$action}{count}) { + $a_dataavg{write}{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{count} || 0); + $a_dataavg{write}{duration}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{$action}{duration} || 0); + } } - delete $per_minute_info{$tm}{$h}{$m}{connection}; } } + if (exists $per_minute_info{$tm}{$h}{$m}{connection}) { + + # Average per minute + $c_dataavg{average}{"$rd"} += $per_minute_info{$tm}{$h}{$m}{connection}{count}; + + # Search minimum and maximum during this minute + foreach my $s (keys %{$per_minute_info{$tm}{$h}{$m}{connection}{second}}) { + $c_dataavg{max}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} + if ($per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} > $c_dataavg{max}{"$rd"}); + $c_dataavg{min}{"$rd"} = $per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} + if (not exists $c_dataavg{min}{"$rd"} + || ($per_minute_info{$tm}{$h}{$m}{connection}{second}{$s} < $c_dataavg{min}{"$rd"})); + } + delete $per_minute_info{$tm}{$h}{$m}{connection}; + } + } - foreach my $rd (@avgs) { - my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; + foreach my $rd (@avgs) { + my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; - next if ($t < $t_min); - last if ($t > $t_max); + next if ($t < $t_min); + last if ($t > $t_max); + if (exists $q_dataavg{count}) { # Average queries per minute $graph_data{query2} .= "[$t, " . int(($q_dataavg{count}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; # Maxi queries per minute @@ -2989,225 +3178,600 @@ sub print_hourly_reports $graph_data{query3} .= "[$t, " . ($q_dataavg{min}{"$rd"} || 0) . "],"; # Average duration per minute $graph_data{query4} .= "[$t, " . sprintf("%.3f", ($q_dataavg{duration}{"$rd"} || 0) / ($q_dataavg{count}{"$rd"} || 1)) . "],"; - if (scalar keys %c_dataavg) { - # Average connections per minute - $graph_data{conn2} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; - # Maxi connections per minute - $graph_data{conn1} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; - - # Mini connections per minute - $graph_data{conn3} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; - } - if (!$disable_query && (scalar keys %a_dataavg> 0)) { - foreach my $action (@SQL_ACTION) { - next if ($select_only && ($action ne 'SELECT')); - # Average per minute - $graph_data{"$action"} .= "[$t, " . ($a_dataavg{$action}{count}{"$rd"} || 0) . "],"; - if ($action eq 'SELECT') { - $graph_data{"$action-2"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{$action}{duration}{"$rd"} || 0) / ($a_dataavg{$action}{count}{"$rd"} || 1)) . "]," if ($action eq 'SELECT'); - } else { - $graph_data{"write"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{write}{duration}{"$rd"} || 0) / ($a_dataavg{write}{count}{"$rd"} || 1)) . "],"; - } + } + if (scalar keys %c_dataavg) { + # Average connections per minute + $graph_data{conn2} .= "[$t, " . int(($c_dataavg{average}{"$rd"} || 0) / (60 * $avg_minutes)) . "],"; + # Maxi connections per minute + $graph_data{conn1} .= "[$t, " . ($c_dataavg{max}{"$rd"} || 0) . "],"; + + # Mini connections per minute + $graph_data{conn3} .= "[$t, " . ($c_dataavg{min}{"$rd"} || 0) . "],"; + } + if (!$disable_query && (scalar keys %a_dataavg> 0)) { + foreach my $action (@SQL_ACTION) { + next if ($select_only && ($action ne 'SELECT')); + # Average per minute + $graph_data{"$action"} .= "[$t, " . ($a_dataavg{$action}{count}{"$rd"} || 0) . "],"; + if ($action eq 'SELECT') { + $graph_data{"$action-2"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{$action}{duration}{"$rd"} || 0) / ($a_dataavg{$action}{count}{"$rd"} || 1)) . "]," if ($action eq 'SELECT'); + } else { + $graph_data{"write"} .= "[$t, " . sprintf("%.3f", ($a_dataavg{write}{duration}{"$rd"} || 0) / ($a_dataavg{write}{count}{"$rd"} || 1)) . "],"; } } } + if (!$disable_query && (scalar keys %p_dataavg> 0)) { + $graph_data{prepare} .= "[$t, " . ($p_dataavg{prepare}{"$rd"} || 0) . "],"; + $graph_data{execute} .= "[$t, " . ($p_dataavg{execute}{"$rd"} || 0) . "],"; + $graph_data{ratio_bind_prepare} .= "[$t, " . sprintf("%.2f", ($p_dataavg{execute}{"$rd"} || 0) / ($p_dataavg{prepare}{"$rd"} || 1)) . "],"; + } } } - foreach (keys %graph_data) { - $graph_data{$_} =~ s/,$//; - } - &flotr2_graph( $graphid++, 'queriespersecond_graph', $graph_data{query1}, - $graph_data{query2}, $graph_data{query3}, 'Queries per second (' . $avg_minutes . ' minutes average)', - 'Queries per second', 'Maximum', 'Average', 'Minimum' - ); - - if (exists $graph_data{conn1}) { - &flotr2_graph( $graphid++, 'connectionspersecond_graph', $graph_data{conn1}, - $graph_data{conn2}, $graph_data{conn3}, 'Connections per second (' . $avg_minutes . ' minutes average)', - 'Connections per second', 'Maximum', 'Average', 'Minimum' - ); - } - - if (!$disable_query) { - &flotr2_graph( $graphid++, 'selectqueries_graph', $graph_data{"SELECT"}, '', '', - 'SELECT queries (' . $avg_minutes . ' minutes period)', - 'Queries', 'Number of queries', '', '' - ) if ($graph_data{"SELECT"}); - # Write queries - if (!$select_only && $graph_data{"write"}) { - &flotr2_graph( - $graphid++, 'writequeries_graph', $graph_data{"DELETE"}, $graph_data{"INSERT"}, $graph_data{"UPDATE"}, 'Write queries (' . $avg_minutes . ' minutes period)', - 'Queries', 'DELETE queries', 'INSERT queries', 'UPDATE queries' - ); - } - } - &flotr2_graph( - $graphid++, 'queriesduration_graph', $graph_data{query4}, $graph_data{"SELECT-2"}, $graph_data{write}, 'Average queries duration (' . $avg_minutes . ' minutes average)', - 'Duration', 'All queries', 'Select queries', 'Write queries' - ); } + foreach (keys %graph_data) { + $graph_data{$_} =~ s/,$//; + } + } + $drawn_graphs{'queriespersecond_graph'} = &flotr2_graph( $graphid++, 'queriespersecond_graph', $graph_data{query1}, + $graph_data{query2}, $graph_data{query3}, 'Queries per second (' . $avg_minutes . ' minutes average)', + 'Queries per second', 'Maximum', 'Average', 'Minimum' + ); + + $drawn_graphs{'connectionspersecond_graph'} = &flotr2_graph( $graphid++, 'connectionspersecond_graph', $graph_data{conn1}, + $graph_data{conn2}, $graph_data{conn3}, 'Connections per second (' . $avg_minutes . ' minutes average)', + 'Connections per second', 'Maximum', 'Average', 'Minimum' + ); + + $drawn_graphs{'selectqueries_graph'} = &flotr2_graph( $graphid++, 'selectqueries_graph', $graph_data{"SELECT"}, '', '', + 'SELECT queries (' . $avg_minutes . ' minutes period)', + 'Queries', 'Number of queries', '', '' + ); + $drawn_graphs{'writequeries_graph'} = &flotr2_graph( + $graphid++, 'writequeries_graph', $graph_data{"DELETE"}, $graph_data{"INSERT"}, $graph_data{"UPDATE"}, 'Write queries (' . $avg_minutes . ' minutes period)', + 'Queries', 'DELETE queries', 'INSERT queries', 'UPDATE queries' + ); + if (!$select_only) { + $drawn_graphs{'durationqueries_graph'} = &flotr2_graph( + $graphid++, 'durationqueries_graph', $graph_data{query4}, $graph_data{"SELECT-2"}, $graph_data{write}, 'Average queries duration (' . $avg_minutes . ' minutes average)', + 'Duration', 'All queries', 'Select queries', 'Write queries' + ); + } else { + $drawn_graphs{'durationqueries_graph'} = &flotr2_graph( + $graphid++, 'durationqueries_graph', $graph_data{query4}, '', '', 'Average queries duration (' . $avg_minutes . ' minutes average)', + 'Duration', 'Select queries' + ); + } + + $drawn_graphs{'bindpreparequeries_graph'} = &flotr2_graph( + $graphid++, 'bindpreparequeries_graph', $graph_data{prepare}, $graph_data{"execute"}, $graph_data{ratio_bind_prepare}, 'Bind versus prepare statements (' . $avg_minutes . ' minutes average)', + 'Number of statements', 'Prepare/Parse', 'Execute/Bind', 'Bind vs prepare' + ); + +} + +sub print_simultaneous_connection +{ + + my $connection_peak = 0; + my $connection_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{connection} <=> $overall_stat{'peak'}{$a}{connection}} keys %{$overall_stat{'peak'}}) { + $connection_peak = &comma_numbers($overall_stat{'peak'}{$_}{connection}); + $connection_peak_date = $_; + last; } + print $fh qq{ +

    Connections

    + +
    +

    Simultaneous Connections

    +
    +

    Key values

    +
    +
      +
    • $connection_peak connections Connection Peak
    • +
    • $connection_peak_date Date
    • +
    +
    +
    +
    +

    Connections per second ($avg_minutes minutes average)

    +$drawn_graphs{connectionspersecond_graph} +
    +
    +}; + delete $drawn_graphs{connectionspersecond_graph}; + } -sub print_checkpoint_tempfile_vacuum_reports +sub print_user_connection { - if (scalar keys %per_minute_info > 0) { + my %infos = (); + my $total_count = 0; + my $c = 0; + my $conn_user_info = ''; + my @main_user = ('unknown',0); + foreach my $u (sort keys %{$connection_info{user}}) { + $conn_user_info .= "$u" . + &comma_numbers($connection_info{user}{$u}) . ""; + $total_count += $connection_info{user}{$u}; + if ($main_user[1] < $connection_info{user}{$u}) { + $main_user[0] = $u; + $main_user[1] = $connection_info{user}{$u}; + } + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$connection_info{user}}) { + if ((($connection_info{user}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $connection_info{user}{$d} || 0; + } else { + $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{user}{$d} || 0; + push(@small, $d); + } + } + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; + delete $infos{"Sum connections < $pie_percentage_limit%"}; + } + } + $drawn_graphs{userconnections_graph} = &flotr2_piegraph($graphid++, 'userconnections_graph', 'Connections per user', %infos); + $total_count = &comma_numbers($total_count); + print $fh qq{ +
    +

    Connections per user

    +
    +

    Key values

    +
    +
      +
    • $main_user[0] Main User
    • +
    • $total_count connections Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{userconnections_graph} +
    +
    + + + + + + + + + $conn_user_info + +
    UserCount
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{userconnections_graph}; +} - print $fh qq{}; - if ($tempfile_info{count}) { - print $fh qq{}; +sub print_host_connection +{ + my %infos = (); + my $total_count = 0; + my $c = 0; + my $conn_host_info = ''; + my @main_host = ('unknown',0); + foreach my $h (sort keys %{$connection_info{host}}) { + $conn_host_info .= ""; + $total_count += $connection_info{host}{$h}; + if ($main_host[1] < $connection_info{host}{$h}) { + $main_host[0] = $h; + $main_host[1] = $connection_info{host}{$h}; } - if ($checkpoint_info{wbuffer}) { - print $fh qq{}; + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$connection_info{host}}) { + if ((($connection_info{host}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $connection_info{host}{$d} || 0; + } else { + $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{host}{$d} || 0; + push(@small, $d); + } + } + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; + delete $infos{"Sum connections < $pie_percentage_limit%"}; + } + } + $drawn_graphs{hostconnections_graph} = &flotr2_piegraph($graphid++, 'hostconnections_graph', 'Connections per host', %infos); + $total_count = &comma_numbers($total_count); + print $fh qq{ +
    +

    Connections per host

    +
    +

    Key values

    +
    +
      +
    • $main_host[0] Main Host
    • +
    • $total_count connections Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{hostconnections_graph} +
    +
    +
    DayHourTemporary files
    $h" . + &comma_numbers($connection_info{host}{$h}) . "
    Checkpoints
    + + + + + + + + $conn_host_info + +
    HostCount
    + + + + + +}; + + delete $drawn_graphs{hostconnections_graph}; +} + +sub print_database_connection +{ + my %infos = (); + my $total_count = 0; + my $conn_database_info = ''; + my @main_database = ('unknown',0); + foreach my $d (sort keys %{$connection_info{database}}) { + $conn_database_info .= "$d " . + &comma_numbers($connection_info{database}{$d}) . ""; + $total_count += $connection_info{database}{$d}; + if ($main_database[1] < $connection_info{database}{$d}) { + $main_database[0] = $d; + $main_database[1] = $connection_info{database}{$d}; } - if ($checkpoint_info{warning}) { - print $fh qq{Checkpoint warning}; + foreach my $u (sort keys %{$connection_info{user}}) { + next if (!exists $connection_info{database_user}{$d}{$u}); + $conn_database_info .= " $u" . + &comma_numbers($connection_info{database_user}{$d}{$u}) . ""; } - if ($restartpoint_info{wbuffer}) { - print $fh qq{Restartpoints}; + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$connection_info{database}}) { + if ((($connection_info{database}{$d} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $connection_info{database}{$d} || 0; + } else { + $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{database}{$d} || 0; + push(@small, $d); + } } - if (exists $autovacuum_info{chronos}) { - print $fh " Autovacuum\n"; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; + delete $infos{"Sum connections < $pie_percentage_limit%"}; } - if ($tempfile_info{count} || $checkpoint_info{wbuffer} || $restartpoint_info{wbuffer}) { - print $fh qq{}; + } + $drawn_graphs{databaseconnections_graph} = &flotr2_piegraph($graphid++, 'databaseconnections_graph', 'Connections per database', %infos); + $total_count = &comma_numbers($total_count); + print $fh qq{ +
    +

    Connections per database

    +
    +

    Key values

    +
    +
      +
    • $main_database[0] Main Database
    • +
    • $total_count connections Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{databaseconnections_graph} +
    +
    + + + + + + + + + + $conn_database_info + +
    DatabaseUserCount
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{databaseconnections_graph}; +} + +sub print_user_session +{ + my %infos = (); + my $total_count = 0; + my $c = 0; + my $sess_user_info = ''; + my @main_user = ('unknown',0); + foreach my $u (sort keys %{$session_info{user}}) { + $sess_user_info .= "$u" . &comma_numbers($session_info{user}{$u}{count}) . + "" . &convert_time($session_info{user}{$u}{duration}), "" . + &convert_time($session_info{user}{$u}{duration} / $session_info{user}{$u}{count}) . + ""; + $total_count += $session_info{user}{$u}{count}; + if ($main_user[1] < $session_info{user}{$u}{count}) { + $main_user[0] = $u; + $main_user[1] = $session_info{user}{$u}{count}; } - if ($tempfile_info{count}) { - print $fh qq{CountAvg size}; + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$session_info{user}}) { + if ((($session_info{user}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $session_info{user}{$d}{count} || 0; + } else { + $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{user}{$d}{count} || 0; + push(@small, $d); + } } - if ($checkpoint_info{wbuffer}) { - print $fh - qq{Written buffersAddedRemovedRecycledWrite time (sec)Sync time (sec)Total time (sec)}; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; + delete $infos{"Sum sessions < $pie_percentage_limit%"}; } - if ($checkpoint_info{warning}) { - print $fh qq{CountAvg time (sec)}; + } + $drawn_graphs{usersessions_graph} = &flotr2_piegraph($graphid++, 'usersessions_graph', 'Connections per user', %infos); + $sess_user_info = qq{$NODATA} if (!$total_count); + $total_count = &comma_numbers($total_count); + print $fh qq{ +
    +

    Sessions per user

    +
    +

    Key values

    +
    +
      +
    • $main_user[0] Main User
    • +
    • $total_count sessions Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{usersessions_graph} +
    +
    + + + + + + + + + + + $sess_user_info + +
    UserCountTotal DurationAverage Duration
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{usersessions_graph}; +} + +sub print_host_session +{ + my %infos = (); + my $total_count = 0; + my $c = 0; + my $sess_host_info = ''; + my @main_host = ('unknown',0); + foreach my $h (sort keys %{$session_info{host}}) { + $sess_host_info .= "$h" . &comma_numbers($session_info{host}{$h}{count}) . + "" . &convert_time($session_info{host}{$h}{duration}) . "" . + &convert_time($session_info{host}{$h}{duration} / $session_info{host}{$h}{count}) . + ""; + $total_count += $session_info{host}{$h}{count}; + if ($main_host[1] < $session_info{host}{$h}{count}) { + $main_host[0] = $h; + $main_host[1] = $session_info{host}{$h}{count}; } - if ($restartpoint_info{wbuffer}) { - print $fh - qq{Written buffersWrite time (sec)Sync time (sec)Total time (sec)}; + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$session_info{host}}) { + if ((($session_info{host}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $session_info{host}{$d}{count} || 0; + } else { + $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{host}{$d}{count} || 0; + push(@small, $d); + } } - if (exists $autovacuum_info{chronos}) { - print $fh " VACUUMsANALYZEs\n"; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; + delete $infos{"Sum sessions < $pie_percentage_limit%"}; } - if ($tempfile_info{count} || $checkpoint_info{wbuffer} || $restartpoint_info{wbuffer}) { - print $fh qq{}; - foreach my $d (sort {$a <=> $b} keys %per_minute_info) { - my $c = 1; - $d =~ /^\d{4}(\d{2})(\d{2})$/; - my $zday = "$abbr_month{$1} $2"; - foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { - my $colb = $c % 2; - $zday = " " if ($c > 1); - print $fh "$zday$h"; - my %tinf = (); - my %cinf = (); - my %rinf = (); - my %ainf = (); - foreach my $m (keys %{$per_minute_info{$d}{$h}}) { + } + $drawn_graphs{hostsessions_graph} = &flotr2_piegraph($graphid++, 'hostsessions_graph', 'Connections per host', %infos); + $sess_host_info = qq{$NODATA} if (!$total_count); + $total_count = &comma_numbers($total_count); + print $fh qq{ +
    +

    Sessions per host

    +
    +

    Key values

    +
    +
      +
    • $main_host[0] Main Host
    • +
    • $total_count sessions Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{hostsessions_graph} +
    +
    + + + + + + + + + + + $sess_host_info + +
    HostCountTotal DurationAverage Duration
    +
    +
    +
    +
    +
    +}; - if (exists $per_minute_info{$d}{$h}{$m}{tempfile}) { - $tinf{size} += $per_minute_info{$d}{$h}{$m}{tempfile}{size}; - $tinf{count} += $per_minute_info{$d}{$h}{$m}{tempfile}{count}; - } - if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}) { - $cinf{wbuffer} += $per_minute_info{$d}{$h}{$m}{checkpoint}{wbuffer}; - $cinf{file_added} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_added}; - $cinf{file_removed} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_removed}; - $cinf{file_recycled} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_recycled}; - $cinf{write} += $per_minute_info{$d}{$h}{$m}{checkpoint}{write}; - $cinf{sync} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync}; - $cinf{total} += $per_minute_info{$d}{$h}{$m}{checkpoint}{total}; - } - if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}{warning}) { - $cinf{warning} += $per_minute_info{$d}{$h}{$m}{checkpoint}{warning}; - $cinf{warning_seconds} += $per_minute_info{$d}{$h}{$m}{checkpoint}{warning_seconds}; - } - if (exists $per_minute_info{$d}{$h}{$m}{restartpoint}) { - $rinf{wbuffer} += $per_minute_info{$d}{$h}{$m}{restartpoint}{wbuffer}; - $rinf{write} += $per_minute_info{$d}{$h}{$m}{restartpoint}{write}; - $rinf{sync} += $per_minute_info{$d}{$h}{$m}{restartpoint}{sync}; - $rinf{total} += $per_minute_info{$d}{$h}{$m}{restartpoint}{total}; - } - if (exists $per_minute_info{$d}{$h}{$m}{autovacuum}{count}) { - $ainf{vcount} += $per_minute_info{$d}{$h}{$m}{autovacuum}{count}; - } - if (exists $per_minute_info{$d}{$h}{$m}{autoanalyze}{count}) { - $ainf{acount} += $per_minute_info{$d}{$h}{$m}{autoanalyze}{count}; - } + delete $drawn_graphs{hostsessions_graph}; +} - } - if ($tempfile_info{count}) { - if (scalar keys %tinf) { - my $temp_average = &comma_numbers(sprintf("%.2f", $tinf{size} / $tinf{count})); - print $fh "", &comma_numbers($tinf{count}), - "$temp_average"; - } else { - print $fh "00"; - } - } - if ($checkpoint_info{wbuffer}) { - if (scalar keys %cinf) { - print $fh "", &comma_numbers($cinf{wbuffer}), - "", &comma_numbers($cinf{file_added}), - "", &comma_numbers($cinf{file_removed}), - "", &comma_numbers($cinf{file_recycled}), - "", &comma_numbers($cinf{write}), - "", &comma_numbers($cinf{sync}), - "", &comma_numbers($cinf{total}), - ""; - } else { - print $fh - "0000000"; - } - } - if ($checkpoint_info{warning}) { - if (exists $cinf{warning}) { - print $fh "", &comma_numbers($cinf{warning}), - "", - &comma_numbers(sprintf( "%.2f", ($cinf{warning_seconds} || 0) / ($cinf{warning} || 1))), ""; - } else { - print $fh "00"; - } - } - if ($restartpoint_info{wbuffer}) { - if (scalar keys %rinf) { - print $fh "", &comma_numbers($rinf{wbuffer}), - "", &comma_numbers($rinf{write}), - "", &comma_numbers($rinf{sync}), - "", &comma_numbers($rinf{total}), - ""; - } else { - print $fh "00", - "0"; - } - } - if ($autovacuum_info{count} > 0) { - if (scalar keys %ainf) { - print $fh "", &comma_numbers($ainf{vcount}), ""; - } else { - print $fh "0"; - } - } - if ($autoanalyze_info{count} > 0) { - if (scalar keys %ainf) { - print $fh "", &comma_numbers($ainf{acount}), ""; - } else { - print $fh "0"; - } - } - print $fh "\n"; - $c++; - } +sub print_database_session +{ + my %infos = (); + my $total_count = 0; + my $sess_database_info = ''; + my @main_database = ('unknown',0); + foreach my $d (sort keys %{$session_info{database}}) { + $sess_database_info .= "$d" . &comma_numbers($session_info{database}{$d}{count}) . + "" . &convert_time($session_info{database}{$d}{duration}) . "" . + &convert_time($session_info{database}{$d}{duration} / $session_info{database}{$d}{count}) . + ""; + $total_count += $session_info{database}{$d}{count}; + if ($main_database[1] < $session_info{database}{$d}{count}) { + $main_database[0] = $d; + $main_database[1] = $session_info{database}{$d}{count}; + } + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$session_info{database}}) { + if ((($session_info{database}{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $session_info{database}{$d}{count} || 0; + } else { + $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{database}{$d}{count} || 0; + push(@small, $d); } - print $fh "\n"; + } + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; + delete $infos{"Sum sessions < $pie_percentage_limit%"}; } } + $drawn_graphs{databasesessions_graph} = &flotr2_piegraph($graphid++, 'databasesessions_graph', 'Connections per database', %infos); + $sess_database_info = qq{$NODATA} if (!$total_count); + + $total_count = &comma_numbers($total_count); + print $fh qq{ +

    Sessions

    + +
    +

    Sessions per database

    +
    +

    Key values

    +
    +
      +
    • $main_database[0] Main Database
    • +
    • $total_count sessions Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{databasesessions_graph} +
    +
    + + + + + + + + + + + + $sess_database_info + +
    DatabaseUserCountTotal DurationAverage Duration
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{databasesessions_graph}; +} + +sub print_checkpoint +{ + # checkpoint + my %graph_data = (); if ($graph) { - # checkpoint - my %graph_data = (); foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { $tm =~ /(\d{4})(\d{2})(\d{2})/; my $y = $1 - 1900; @@ -3231,23 +3795,6 @@ sub print_checkpoint_tempfile_vacuum_reports $chk_dataavg{file_recycled}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{checkpoint}{file_recycled} || 0); } } - if ($restartpoint_info{wbuffer}) { - if (exists $per_minute_info{$tm}{$h}{$m}{restartpoint}) { - $chk_dataavg{rwbuffer}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{restartpoint}{wbuffer} || 0); - } - } - if ($tempfile_info{count}) { - if (exists $per_minute_info{$tm}{$h}{$m}{tempfile}) { - $t_dataavg{size}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{tempfile}{size} || 0); - $t_dataavg{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{tempfile}{count} || 0); - } - } - if (exists $per_minute_info{$tm}{$h}{$m}{autovacuum}) { - $v_dataavg{vcount}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{autovacuum}{count} || 0); - } - if (exists $per_minute_info{$tm}{$h}{$m}{autoanalyze}) { - $v_dataavg{acount}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{autoanalyze}{count} || 0); - } } foreach my $rd (@avgs) { @@ -3256,646 +3803,1458 @@ sub print_checkpoint_tempfile_vacuum_reports next if ($t < $t_min); last if ($t > $t_max); - # Average of written buffers + # Average of written checkpoints buffers and wal files if (exists $chk_dataavg{wbuffer}) { $graph_data{wbuffer} .= "[$t, " . ($chk_dataavg{wbuffer}{"$rd"} || 0) . "],"; $graph_data{file_added} .= "[$t, " . ($chk_dataavg{file_added}{"$rd"} || 0) . "],"; $graph_data{file_removed} .= "[$t, " . ($chk_dataavg{file_removed}{"$rd"} || 0) . "],"; $graph_data{file_recycled} .= "[$t, " . ($chk_dataavg{file_recycled}{"$rd"} || 0) . "],"; } - if (exists $chk_dataavg{rwbuffer}) { - $graph_data{rwbuffer} .= "[$t, " . ($chk_dataavg{rwbuffer}{"$rd"} || 0) . "],"; - } - if (exists $t_dataavg{size}) { - $graph_data{size} .= "[$t, " . ($t_dataavg{size}{"$rd"} || 0) . "],"; - $graph_data{count} .= "[$t, " . ($t_dataavg{count}{"$rd"} || 0) . "],"; - } - if (exists $v_dataavg{vcount}) { - $graph_data{vcount} .= "[$t, " . ($v_dataavg{vcount}{"$rd"} || 0) . "],"; - } - if (exists $v_dataavg{acount}) { - $graph_data{acount} .= "[$t, " . ($v_dataavg{acount}{"$rd"} || 0) . "],"; - } - } } } foreach (keys %graph_data) { $graph_data{$_} =~ s/,$//; } - if (exists $graph_data{wbuffer}) { - &flotr2_graph( - $graphid++, 'checkpointwritebuffers_graph', $graph_data{wbuffer}, '', '', 'Checkpoint write buffers (' . $avg_minutes . ' minutes period)', - 'Buffers', 'Write buffers', '', '' - ); - - &flotr2_graph( - $graphid++, 'checkpointfiles_graph', $graph_data{file_added}, $graph_data{file_removed}, $graph_data{file_recycled}, - 'Checkpoint Wal files usage', 'Number of files', 'Added', 'Removed', 'Recycled' - ); - } - # Restart point - if ($restartpoint_info{wbuffer} && $graph_data{rwbuffer}) { - &flotr2_graph( - $graphid++, 'restartpointwritebuffers_graph', $graph_data{rwbuffer}, '', '', 'Restartpoint write buffers (' . $avg_minutes . ' minutes period)', - 'Buffers', 'Write buffers', '', '' - ); - } - - # Temporary file size - if (exists $graph_data{size}) { - &flotr2_graph( - $graphid++, 'temporarydata_graph', $graph_data{size}, '', '', 'Size of temporary files (' . $avg_minutes . ' minutes period)', - 'Size of files', 'Size of files' - ); - } - - # Temporary file number - if ($tempfile_info{count}) { - &flotr2_graph( - $graphid++, 'temporaryfile_graph', $graph_data{count}, '', '', 'Number of temporary files (' . $avg_minutes . ' minutes period)', - 'Number of files', 'Number of files' - ); - } - - - # VACUUMs and ANALYZEs - if ($autovacuum_info{count} || $autoanalyze_info{count}) { - if (exists $graph_data{vcount} || exists $graph_data{acount}) { - &flotr2_graph( - $graphid++, 'autovacuum_graph', $graph_data{vcount}, $graph_data{acount}, '', 'Autovacuum actions (' . $avg_minutes . ' minutes period)', - '', 'VACUUMs', 'ANALYZEs' - ); - } - } } + # Checkpoints buffers and files + $drawn_graphs{checkpointwritebuffers_graph} = + &flotr2_graph($graphid++, 'checkpointwritebuffers_graph', $graph_data{wbuffer}, '', '', + 'Checkpoint write buffers (' . $avg_minutes . ' minutes period)', + 'Buffers', 'Write buffers', '', '' + ); + $drawn_graphs{checkpointfiles_graph} = + &flotr2_graph($graphid++, 'checkpointfiles_graph', $graph_data{file_added}, + $graph_data{file_removed}, $graph_data{file_recycled}, 'Checkpoint Wal files usage', + 'Number of files', 'Added', 'Removed', 'Recycled' + ); -} + my $checkpoint_wbuffer_peak = 0; + my $checkpoint_wbuffer_peak_date = ''; + foreach (sort {$overall_checkpoint{'peak'}{$b}{checkpoint_wbuffer} <=> $overall_checkpoint{'peak'}{$a}{checkpoint_wbuffer}} keys %{$overall_checkpoint{'peak'}}) { + $checkpoint_wbuffer_peak = &comma_numbers($overall_checkpoint{'peak'}{$_}{checkpoint_wbuffer}); + $checkpoint_wbuffer_peak_date = $_; + last; + } + my $walfile_usage_peak = 0; + my $walfile_usage_peak_date = ''; + foreach (sort {$overall_checkpoint{'peak'}{$b}{walfile_usage} <=> $overall_checkpoint{'peak'}{$a}{walfile_usage}} keys %{$overall_checkpoint{'peak'}}) { + $walfile_usage_peak = &comma_numbers($overall_checkpoint{'peak'}{$_}{walfile_usage}); + $walfile_usage_peak_date = $_; + last; + } -sub print_vacuum_reports -{ - # VACUUM stats per table - if ($autovacuum_info{count} > 0) { - print $fh qq{ -

    VACUUMs by table ^

    - -"; + $files .= ""; + $warnings .= ""; + $zday = ''; + my %cinf = (); + my %rinf = (); + my %cainf = (); + my %rainf = (); + foreach my $m (keys %{$per_minute_info{$d}{$h}}) { + + if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}) { + $cinf{wbuffer} += $per_minute_info{$d}{$h}{$m}{checkpoint}{wbuffer}; + $cinf{file_added} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_added}; + $cinf{file_removed} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_removed}; + $cinf{file_recycled} += $per_minute_info{$d}{$h}{$m}{checkpoint}{file_recycled}; + $cinf{write} += $per_minute_info{$d}{$h}{$m}{checkpoint}{write}; + $cinf{sync} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync}; + $cinf{total} += $per_minute_info{$d}{$h}{$m}{checkpoint}{total}; + $cainf{sync_files} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync_files}; + $cainf{sync_avg} += $per_minute_info{$d}{$h}{$m}{checkpoint}{sync_avg}; + $cainf{sync_longest} = $per_minute_info{$d}{$h}{$m}{checkpoint}{sync_longest} + if ($per_minute_info{$d}{$h}{$m}{checkpoint}{sync_longest} > $cainf{sync_longest}); } - } - &flotr2_piegraph($graphid++, 'autovacuumbytable_graph', 'Autovacuum per table', %data); - %data = (); - if ($total_tuples) { - print $fh "
    \n"; - foreach my $t (sort keys %{$autovacuum_info{tables}}) { - if ((($autovacuum_info{tables}{$t}{tuples}{removed} * 100) / $total_tuples) > $pie_percentage_limit) { - $data{$t} = $autovacuum_info{tables}{$t}{tuples}{removed} || 0; - } else { - $data{"Others"} += $autovacuum_info{tables}{$t}{tuples}{removed} || 0; - } + if (exists $per_minute_info{$d}{$h}{$m}{checkpoint}{warning}) { + $cinf{warning} += $per_minute_info{$d}{$h}{$m}{checkpoint}{warning}; + $cinf{warning_seconds} += $per_minute_info{$d}{$h}{$m}{checkpoint}{warning_seconds}; } - &flotr2_piegraph($graphid++, 'autovacuumtuplesremoved_graph', 'Autovacuum tuples removed per table', %data); + } + if (scalar keys %cinf) { + $buffers .= ""; + $files .= ""; + } else { + $buffers .= ""; + $files .= ""; + } + if (exists $cinf{warning}) { + $warnings .= ""; + } else { + $warnings .= ""; } } - print $fh "
    - - - - - - - - + print $fh qq{ +

    Checkpoints / Restartpoints

    + +
    +

    Checkpoints Buffers

    +
    +

    Key values

    +
    +
      +
    • $checkpoint_wbuffer_peak buffers Checkpoint Peak
    • +
    • $checkpoint_wbuffer_peak_date Date
    • +
    • $overall_checkpoint{checkpoint_write} seconds Highest write time
    • +
    • $overall_checkpoint{checkpoint_sync} seconds Sync time
    • +
    +
    +
    +
    +

    Checkpoint write buffers ($avg_minutes minutes average)

    +$drawn_graphs{checkpointwritebuffers_graph} +
    +
    }; - my $total_count = 0; - my $total_idxscan = 0; - my $total_tuples = 0; - my $total_pages = 0; - foreach my $t (sort keys %{$autovacuum_info{tables}}) { - print $fh "\n"; - $total_count += $autovacuum_info{tables}{$t}{vacuums}; - $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; - $total_tuples += $autovacuum_info{tables}{$t}{tuples}{removed}; - $total_pages += $autovacuum_info{tables}{$t}{pages}{removed}; - } - print $fh "\n"; - print $fh "
    TableVACUUMsIndex scansTuples removedPages removed
    ", $t, - "", $autovacuum_info{tables}{$t}{vacuums}, - "", $autovacuum_info{tables}{$t}{idxscans}, - "", $autovacuum_info{tables}{$t}{tuples}{removed}, - "", $autovacuum_info{tables}{$t}{pages}{removed}, - "
    Total", $total_count, - "", $total_idxscan, - "", $total_tuples, - "", $total_pages, "
    \n"; - if ($graph && $total_count) { - my %data = (); - foreach my $t (sort keys %{$autovacuum_info{tables}}) { - if ((($autovacuum_info{tables}{$t}{vacuums} * 100) / $total_count) > $pie_percentage_limit) { - $data{$t} = $autovacuum_info{tables}{$t}{vacuums} || 0; - } else { - $data{"Others"} += $autovacuum_info{tables}{$t}{vacuums} || 0; + delete $drawn_graphs{checkpointwritebuffers_graph}; + + print $fh qq{ +
    +

    Checkpoints Wal files

    +
    +

    Key values

    +
    +
      +
    • $walfile_usage_peak files Wal files usage Peak
    • +
    • $walfile_usage_peak_date Date
    • +
    +
    +
    +
    +

    Checkpoint Wal files usage

    +$drawn_graphs{checkpointfiles_graph} +
    +
    +}; + delete $drawn_graphs{checkpointfiles_graph}; + + my $buffers = ''; + my $files = ''; + my $warnings = ''; + foreach my $d (sort {$a <=> $b} keys %per_minute_info) { + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { + $buffers .= "
    $zday$h
    $zday$h
    $zday$h" . &comma_numbers($cinf{wbuffer}) . + "" . &comma_numbers($cinf{write}) . + "" . &comma_numbers($cinf{sync}) . + "" . &comma_numbers($cinf{total}) . + "
    " . &comma_numbers($cinf{file_added}) . + "" . &comma_numbers($cinf{file_removed}) . + "" . &comma_numbers($cinf{file_recycled}) . + "" . &comma_numbers($cainf{sync_files}) . + "" . &comma_numbers($cainf{sync_longest}) . + "" . &comma_numbers($cainf{sync_avg}) . + "
    0000
    000000
    " . &comma_numbers($cinf{warning}) . "" . + &comma_numbers(sprintf( "%.2f", ($cinf{warning_seconds} || 0) / ($cinf{warning} || 1))) . + "
    00
    \n"; - } - - # ANALYZE stats per table - if ($autoanalyze_info{count} > 0) { - print $fh qq{ -

    ANALYZEs by table ^

    - -
    - - - - - -}; - my $total_count = 0; - my $total_idxscan = 0; - foreach my $t (sort keys %{$autoanalyze_info{tables}}) { - print $fh "\n"; - $total_count += $autoanalyze_info{tables}{$t}{analyzes}; - } - print $fh "\n"; - print $fh "
    TableANALYZEs
    ", $t, - "", $autoanalyze_info{tables}{$t}{analyzes}, - "
    Total", $total_count, - "
    \n"; } -} -sub print_query_type_report -{ - my $totala = 0; - foreach my $a (@SQL_ACTION) { - $totala += $overall_stat{$a}; - } - return if (!$totala); + $buffers = qq{$NODATA} if (!$buffers); + $files = qq{$NODATA} if (!$files); + $warnings = qq{$NODATA} if (!$warnings); print $fh qq{ -

    Queries by type ^

    - -\n"; - ($r > 0) ? $r = 0 : $r = 1; - } - print $fh "\n" - if (($total - $totala) > 0); - print $fh "
    - - - - - - - +
    +

    Checkpoints Activity

    +
    TypeCountPercentage
    + + + + + + + + + + + $buffers + +
    DayHourWritten buffersWrite timeSync timeTotal time
    + +
    + + + + + + + + + + + + + + $files + +
    DayHourAddedRemovedRecycledSynced filesLongest syncAverage sync
    +
    +
    + + + + + + + + + + $warnings + +
    DayHourCountAvg time (sec)
    +
    + + Back to the top of the Checkpoint Activity table + + + }; - my $total = $overall_stat{'queries_number'} || 1; - my $r = 0; - foreach my $a (@SQL_ACTION) { - print $fh "
    $a", &comma_numbers($overall_stat{$a}), - "", sprintf("%0.2f", ($overall_stat{$a} * 100) / $total), "%
    OTHERS", &comma_numbers($total - $totala), - "", sprintf("%0.2f", (($total - $totala) * 100) / $total), "%
    \n"; +} + +sub print_temporary_file +{ + # checkpoint + my %graph_data = (); if ($graph) { - my %data = (); - foreach my $t (@SQL_ACTION) { - if ((($overall_stat{$t} * 100) / $total) > $pie_percentage_limit) { - $data{$t} = $overall_stat{$t} || 0; - } else { - $data{"Sum types < $pie_percentage_limit%"} += $overall_stat{$t} || 0; + foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { + $tm =~ /(\d{4})(\d{2})(\d{2})/; + my $y = $1 - 1900; + my $mo = $2 - 1; + my $d = $3; + foreach my $h ("00" .. "23") { + next if (!exists $per_minute_info{$tm}{$h}); + my %chk_dataavg = (); + my %t_dataavg = (); + my %v_dataavg = (); + foreach my $m ("00" .. "59") { + next if (!exists $per_minute_info{$tm}{$h}{$m}); + my $rd = &average_per_minutes($m, $avg_minutes); + if ($tempfile_info{count}) { + if (exists $per_minute_info{$tm}{$h}{$m}{tempfile}) { + $t_dataavg{size}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{tempfile}{size} || 0); + $t_dataavg{count}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{tempfile}{count} || 0); + } + } + } + + foreach my $rd (@avgs) { + my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; + + next if ($t < $t_min); + last if ($t > $t_max); + + if (exists $t_dataavg{size}) { + $graph_data{size} .= "[$t, " . ($t_dataavg{size}{"$rd"} || 0) . "],"; + $graph_data{count} .= "[$t, " . ($t_dataavg{count}{"$rd"} || 0) . "],"; + } + } } } - if (((($total - $totala) * 100) / $total) > $pie_percentage_limit) { - $data{'Others'} = $total - $totala; - } else { - $data{"Sum types < $pie_percentage_limit%"} += $total - $totala; + foreach (keys %graph_data) { + $graph_data{$_} =~ s/,$//; } - &flotr2_piegraph($graphid++, 'queriesbytype_graph', 'Type of queries', %data); } - print $fh "\n"; -} + # Temporary file size + $drawn_graphs{temporarydata_graph} = + &flotr2_graph($graphid++, 'temporarydata_graph', $graph_data{size}, '', '', + 'Size of temporary files (' . $avg_minutes . ' minutes period)', + 'Size of files', 'Size of files' + ); + # Temporary file number + $drawn_graphs{temporaryfile_graph} = + &flotr2_graph($graphid++, 'temporaryfile_graph', $graph_data{count}, '', '', + 'Number of temporary files (' . $avg_minutes . ' minutes period)', + 'Number of files', 'Number of files' + ); -sub print_request_report -{ - # Show request per database statistics - if (scalar keys %database_info > 1) { - print $fh qq{ -

    Queries per database ^

    - -"; + $zday = ""; + my %tinf = (); + foreach my $m (keys %{$per_minute_info{$d}{$h}}) { + if (exists $per_minute_info{$d}{$h}{$m}{tempfile}) { + $tinf{size} += $per_minute_info{$d}{$h}{$m}{tempfile}{size}; + $tinf{count} += $per_minute_info{$d}{$h}{$m}{tempfile}{count}; } } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum databases < $pie_percentage_limit%"}; - delete $infos{"Sum databases < $pie_percentage_limit%"}; + if (scalar keys %tinf) { + my $temp_average = &pretty_print_size(sprintf("%.2f", $tinf{size} / $tinf{count})); + $tempfiles_activity .= ""; + } else { + $tempfiles_activity .= ""; } - &flotr2_piegraph($graphid++, 'requestsdatabases_graph', 'Queries per database', %infos); } - print $fh "
    - - - - - - + my $tempfile_size_peak = 0; + my $tempfile_size_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{tempfile_size} <=> $overall_stat{'peak'}{$a}{tempfile_size}} keys %{$overall_stat{'peak'}}) { + $tempfile_size_peak = &pretty_print_size($overall_stat{'peak'}{$_}{tempfile_size}); + $tempfile_size_peak_date = $_; + last; + } + print $fh qq{ +

    Temporary Files

    + +
    +

    Size of temporary files

    +
    +

    Key values

    +
    +
      +
    • $tempfile_size_peak Temp Files Peak
    • +
    • $tempfile_size_peak_date Date
    • +
    +
    +
    +
    +

    Size of temporary files ($avg_minutes minutes average)

    +$drawn_graphs{temporarydata_graph} +
    +
    }; - my $total_count = 0; - foreach my $d (sort keys %database_info) { - print $fh "\n"; - $total_count += $database_info{$d}{count}; - foreach my $r (sort keys %{$database_info{$d}}) { - next if ($r eq 'count'); - print $fh "\n"; - } - } - print $fh "
    DatabaseRequest typeCount
    $d", - &comma_numbers($database_info{$d}{count}), "
    $r", - &comma_numbers($database_info{$d}{$r}), "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %database_info) { - if ((($database_info{$d}{count} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $database_info{$d}{count} || 0; - } else { - $infos{"Sum databases < $pie_percentage_limit%"} += $database_info{$d}{count} || 0; - push(@small, $d); + delete $drawn_graphs{temporarydata_graph}; + + my $tempfile_count_peak = 0; + my $tempfile_count_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{tempfile_count} <=> $overall_stat{'peak'}{$a}{tempfile_count}} keys %{$overall_stat{'peak'}}) { + $tempfile_count_peak = &comma_numbers($overall_stat{'peak'}{$_}{tempfile_count}); + $tempfile_count_peak_date = $_; + last; + } + print $fh qq{ +
    +

    Number of temporary files

    +
    +

    Key values

    +
    +
      +
    • $tempfile_count_peak per second Temp Files Peak
    • +
    • $tempfile_count_peak_date Date
    • +
    +
    +
    +
    +

    Number of temporary files ($avg_minutes minutes average)

    +$drawn_graphs{temporaryfile_graph} +
    +
    +}; + delete $drawn_graphs{temporaryfile_graph}; + + my $tempfiles_activity = ''; + foreach my $d (sort {$a <=> $b} keys %per_minute_info) { + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { + $tempfiles_activity .= "
    $zday$h" . &comma_numbers($tinf{count}) . + "$temp_average00
    \n"; } - # Show request per application statistics - if (scalar keys %application_info > 1) { - print $fh qq{ -

    Queries per application ^

    - -\n"; - $total_count += $application_info{$d}{count}; - foreach my $r (sort keys %{$application_info{$d}}) { - next if ($r eq 'count'); - print $fh "\n"; - } + +} + +sub print_analyze_per_table +{ + # ANALYZE stats per table + my %infos = (); + my $total_count = 0; + my $analyze_info = ''; + my @main_analyze = ('unknown',0); + foreach my $t (sort {$autoanalyze_info{tables}{$b}{analyzes} <=> $autoanalyze_info{tables}{$a}{analyzes}} keys %{$autoanalyze_info{tables}}) { + $analyze_info .= ""; + $total_count += $autoanalyze_info{tables}{$t}{analyzes}; + if ($main_analyze[1] < $autoanalyze_info{tables}{$t}{analyzes}) { + $main_analyze[0] = $t; + $main_analyze[1] = $autoanalyze_info{tables}{$t}{analyzes}; } - print $fh "
    - - - - - - + $tempfiles_activity = qq{} if (!$tempfiles_activity); + + print $fh qq{ +
    +

    Temporary Files Activity

    +
    + +
    +
    +
    DatabaseRequest typeCount
    $NODATA
    + + + + + + + + + $tempfiles_activity + +
    DayHourCountAverage size
    + + + Back to the top of the Temporay Files Activity table + + + }; - my $total_count = 0; - foreach my $d (sort keys %application_info) { - print $fh "
    $d", - &comma_numbers($application_info{$d}{count}), "
    $r", - &comma_numbers($application_info{$d}{$r}), "
    $t" . $autoanalyze_info{tables}{$t}{analyzes} . + "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %application_info) { - if ((($application_info{$d}{count} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $application_info{$d}{count} || 0; - } else { - $infos{"Sum applications < $pie_percentage_limit%"} += $application_info{$d}{count} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum applications < $pie_percentage_limit%"}; - delete $infos{"Sum applications < $pie_percentage_limit%"}; + } + $analyze_info .= "Total" . &comma_numbers($total_count) . ""; + + if ($graph) { + my @small = (); + foreach my $d (sort keys %{$autoanalyze_info{tables}}) { + if ((($autoanalyze_info{tables}{$d}{analyzes} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $autoanalyze_info{tables}{$d}{analyzes} || 0; + } else { + $infos{"Sum analyzes < $pie_percentage_limit%"} += $autoanalyze_info{tables}{$d}{analyzes} || 0; + push(@small, $d); } - &flotr2_piegraph($graphid++, 'requestsapplications_graph', 'Queries per application', %infos); } - print $fh "\n"; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum analyzes < $pie_percentage_limit%"}; + delete $infos{"Sum analyzes < $pie_percentage_limit%"}; + } + } + $drawn_graphs{tableanalyzes_graph} = &flotr2_piegraph($graphid++, 'tableanalyzes_graph', 'Analyzes per tables', %infos); + $total_count = &comma_numbers($total_count); + my $database = ''; + if ($main_analyze[0] =~ s/^([^\.]+)\.//) { + $database = $1; } + + $analyze_info = qq{$NODATA} if (!$total_count); + + print $fh qq{ +
    +

    Analyses per table

    +
    +

    Key values

    +
    +
      +
    • $main_analyze[0] ($main_analyze[1]) Main table analyzed (database $database)
    • +
    • $total_count analyzes Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{tableanalyzes_graph} +
    +
    + + + + + + + + + $analyze_info + +
    TableNumber of analyzes
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{tableanalyzes_graph}; + } -sub print_locktype_report +sub print_vacuum { - if (scalar keys %lock_info > 0) { - print $fh qq{ -

    Locks by type ^

    - -"; + $zday = ""; + my %ainf = (); + foreach my $m (keys %{$per_minute_info{$d}{$h}}) { + + if (exists $per_minute_info{$d}{$h}{$m}{autovacuum}{count}) { + $ainf{vcount} += $per_minute_info{$d}{$h}{$m}{autovacuum}{count}; + } + if (exists $per_minute_info{$d}{$h}{$m}{autoanalyze}{count}) { + $ainf{acount} += $per_minute_info{$d}{$h}{$m}{autoanalyze}{count}; } + } - if ($#small == 0) { - $locktype{$small[0]} = $locktype{"Sum types < $pie_percentage_limit%"}; - delete $locktype{"Sum types < $pie_percentage_limit%"}; + if (scalar keys %ainf) { + $vacuum_activity .= ""; + } else { + $vacuum_activity .= ""; + } + if (scalar keys %ainf) { + $vacuum_activity .= ""; + } else { + $vacuum_activity .= ""; } - &flotr2_piegraph($graphid++, 'lockbytype_graph', 'Type of locks', %locktype); } - print $fh "
    - - - - - - - - -}; - my $total_count = 0; - my $total_duration = 0; - foreach my $t (sort keys %lock_info) { - print $fh "\n"; - foreach my $o (sort keys %{$lock_info{$t}}) { - next if (($o eq 'count') || ($o eq 'duration') || ($o eq 'chronos')); - print $fh "\n"; + + # checkpoint + my %graph_data = (); + foreach my $tm (sort {$a <=> $b} keys %per_minute_info) { + $tm =~ /(\d{4})(\d{2})(\d{2})/; + my $y = $1 - 1900; + my $mo = $2 - 1; + my $d = $3; + foreach my $h ("00" .. "23") { + next if (!exists $per_minute_info{$tm}{$h}); + my %chk_dataavg = (); + my %t_dataavg = (); + my %v_dataavg = (); + foreach my $m ("00" .. "59") { + next if (!exists $per_minute_info{$tm}{$h}{$m}); + + my $rd = &average_per_minutes($m, $avg_minutes); + + if (exists $per_minute_info{$tm}{$h}{$m}{autovacuum}) { + $v_dataavg{vcount}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{autovacuum}{count} || 0); + } + if (exists $per_minute_info{$tm}{$h}{$m}{autoanalyze}) { + $v_dataavg{acount}{"$rd"} += ($per_minute_info{$tm}{$h}{$m}{autoanalyze}{count} || 0); + } + } + + foreach my $rd (@avgs) { + my $t = timegm_nocheck(0, $rd, $h, $d, $mo, $y) * 1000; + + next if ($t < $t_min); + last if ($t > $t_max); + + if (exists $v_dataavg{vcount}) { + $graph_data{vcount} .= "[$t, " . ($v_dataavg{vcount}{"$rd"} || 0) . "],"; + } + if (exists $v_dataavg{acount}) { + $graph_data{acount} .= "[$t, " . ($v_dataavg{acount}{"$rd"} || 0) . "],"; + } + } - $total_count += $lock_info{$t}{count}; - $total_duration += $lock_info{$t}{duration}; } - print $fh "\n"; - print $fh "
    TypeObjectCountTotal DurationAvg duration (s)
    $t", &comma_numbers($lock_info{$t}{count}), - "", &convert_time($lock_info{$t}{duration}), "", - &convert_time($lock_info{$t}{duration} / $lock_info{$t}{count}), "
    $o", - &comma_numbers($lock_info{$t}{$o}{count}), "", - &convert_time($lock_info{$t}{$o}{duration}), "", - &convert_time($lock_info{$t}{$o}{duration} / $lock_info{$t}{$o}{count}), "
    Total", &comma_numbers($total_count), - "", &convert_time($total_duration), "", - &convert_time($total_duration / ($total_count || 1)), "
    \n"; - if ($graph && $total_count) { - my %locktype = (); - my @small = (); - foreach my $d (sort keys %lock_info) { - if ((($lock_info{$d}{count} * 100) / $total_count) > $pie_percentage_limit) { - $locktype{$d} = $lock_info{$d}{count} || 0; - } else { - $locktype{"Sum types < $pie_percentage_limit%"} += $lock_info{$d}{count} || 0; - push(@small, $d); + } + foreach (keys %graph_data) { + $graph_data{$_} =~ s/,$//; + } + + # VACUUMs vs ANALYZEs chart + $drawn_graphs{autovacuum_graph} = + &flotr2_graph($graphid++, 'autovacuum_graph', $graph_data{vcount}, $graph_data{acount}, + '', 'Autovacuum actions (' . $avg_minutes . ' minutes period)', '', 'VACUUMs', 'ANALYZEs' + ); + + my $vacuum_size_peak = 0; + my $vacuum_size_peak_date = ''; + foreach (sort {$overall_stat{'peak'}{$b}{vacuum_size} <=> $overall_stat{'peak'}{$a}{vacuum_size}} keys %{$overall_stat{'peak'}}) { + $vacuum_size_peak = &comma_numbers($overall_stat{'peak'}{$_}{vacuum_size}); + $vacuum_size_peak_date = $_; + last; + } + my $autovacuum_peak_system_usage_db = ''; + if ($autovacuum_info{peak}{system_usage}{table} =~ s/^([^\.]+)\.//) { + $autovacuum_peak_system_usage_db = $1; + } + my $autoanalyze_peak_system_usage_db = ''; + if ($autoanalyze_info{peak}{system_usage}{table} =~ s/^([^\.]+)\.//) { + $autoanalyze_peak_system_usage_db = $1; + } + $autovacuum_info{peak}{system_usage}{elapsed} ||= 0; + $autoanalyze_info{peak}{system_usage}{elapsed} ||= 0; + print $fh qq{ +

    Vacuums

    + +
    +

    Vacuums / Analyzes Distribution

    +
    +

    Key values

    +
    +
      +
    • $autovacuum_info{peak}{system_usage}{elapsed} sec More CPU costly vacuum
      Table $autovacuum_info{peak}{system_usage}{table}
      Database $autovacuum_peak_system_usage_db
    • +
    • $autovacuum_info{peak}{system_usage}{date} Date
    • +
    • $autoanalyze_info{peak}{system_usage}{elapsed} sec More CPU costly analyze
      Table $autoanalyze_info{peak}{system_usage}{table}
      Database $autovacuum_peak_system_usage_db
    • +
    • $autoanalyze_info{peak}{system_usage}{date} Date
    • +
    +
    +
    +
    +

    Autovacuum actions ($avg_minutes minutes average)

    +$drawn_graphs{autovacuum_graph} +
    +
    +}; + delete $drawn_graphs{autovacuum_graph}; + + # ANALYZE stats per table + &print_analyze_per_table(); + # VACUUM stats per table + &print_vacuum_per_table(); + + # Show tuples and pages removed per table + &print_vacuum_tuple_removed; + &print_vacuum_page_removed; + + my $vacuum_activity = ''; + foreach my $d (sort {$a <=> $b} keys %per_minute_info) { + my $c = 1; + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort {$a <=> $b} keys %{$per_minute_info{$d}}) { + $vacuum_activity .= "
    $zday$h" . &comma_numbers($ainf{vcount}) . "0" . &comma_numbers($ainf{acount}) . "0
    \n"; } + + $vacuum_activity = qq{$NODATA} if (!$vacuum_activity); + + print $fh qq{ +
    +

    Autovacuum Activity

    +
    + +
    +
    + + + + + + + + + + $vacuum_activity + +
    DayHourVACUUMsANALYZEs
    +
    +
    + Back to the top of the Temporay Files Activity table +
    + +
    +}; + } -sub print_session_report +sub print_vacuum_per_table { - if (exists $session_info{database}) { - print $fh qq{ -

    Sessions per database ^

    - -\n"; - $total_count += $session_info{database}{$d}{count}; - $c++; + delete $drawn_graphs{tablevacuums_graph}; + +} + +sub print_vacuum_tuple_removed +{ + # VACUUM stats per table + my $total_count = 0; + my $total_idxscan = 0; + my $total_tuple = 0; + my $total_page = 0; + my $vacuum_info = ''; + my @main_tuple = ('unknown',0); + foreach my $t (sort {$autovacuum_info{tables}{$b}{tuples}{removed} <=> $autovacuum_info{tables}{$a}{tuples}{removed}} keys %{$autovacuum_info{tables}}) { + $vacuum_info .= ""; + $total_count += $autovacuum_info{tables}{$t}{vacuums}; + $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; + $total_tuple += $autovacuum_info{tables}{$t}{tuples}{removed}; + $total_page += $autovacuum_info{tables}{$t}{pages}{removed}; + if ($main_tuple[1] < $autovacuum_info{tables}{$t}{tuples}{removed}) { + $main_tuple[0] = $t; + $main_tuple[1] = $autovacuum_info{tables}{$t}{tuples}{removed}; + } + } + $vacuum_info .= ""; + + my %infos_tuple = (); + my @small = (); + foreach my $d (sort keys %{$autovacuum_info{tables}}) { + if ((($autovacuum_info{tables}{$d}{tuples}{removed} * 100) / ($total_tuple||1)) > $pie_percentage_limit) { + $infos_tuple{$d} = $autovacuum_info{tables}{$d}{tuples}{removed} || 0; + } else { + $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"} += $autovacuum_info{tables}{$d}{tuples}{removed} || 0; + push(@small, $d); } - print $fh "
    - - - - - - - + # VACUUM stats per table + my $total_count = 0; + my $total_idxscan = 0; + my $vacuum_info = ''; + my @main_vacuum = ('unknown',0); + foreach my $t (sort {$autovacuum_info{tables}{$b}{vacuums} <=> $autovacuum_info{tables}{$a}{vacuums}} keys %{$autovacuum_info{tables}}) { + $vacuum_info .= ""; + $total_count += $autovacuum_info{tables}{$t}{vacuums}; + $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; + if ($main_vacuum[1] < $autovacuum_info{tables}{$t}{vacuums}) { + $main_vacuum[0] = $t; + $main_vacuum[1] = $autovacuum_info{tables}{$t}{vacuums}; + } + } + $vacuum_info .= ""; + + my %infos = (); + my @small = (); + foreach my $d (sort keys %{$autovacuum_info{tables}}) { + if ((($autovacuum_info{tables}{$d}{vacuums} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $infos{$d} = $autovacuum_info{tables}{$d}{vacuums} || 0; + } else { + $infos{"Sum vacuums < $pie_percentage_limit%"} += $autovacuum_info{tables}{$d}{vacuums} || 0; + push(@small, $d); + } + } + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum vacuums < $pie_percentage_limit%"}; + delete $infos{"Sum vacuums < $pie_percentage_limit%"}; + } + $drawn_graphs{tablevacuums_graph} = &flotr2_piegraph($graphid++, 'tablevacuums_graph', 'Analyzes per tables', %infos); + $vacuum_info = qq{} if (!$total_count); + $total_count = &comma_numbers($total_count); + my $database = ''; + if ($main_vacuum[0] =~ s/^([^\.]+)\.//) { + $database = $1; + } + print $fh qq{ +
    +

    Vacuums per table

    +
    +

    Key values

    +
    +
      +
    • $main_vacuum[0] ($main_vacuum[1]) Main table vacuumed on database $database
    • +
    • $total_count vacuums Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{tablevacuums_graph} +
    +
    +
    DatabaseCountTotal DurationAvg duration (s)
    $t" . $autovacuum_info{tables}{$t}{vacuums} . + "" . $autovacuum_info{tables}{$t}{idxscans} . + "
    Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . "
    $NODATA
    + + + + + + + + + $vacuum_info + +
    TableNumber of vacuumsIndex scans
    + + + + + }; - my $total_count = 0; - my $c = 0; - foreach my $d (sort keys %{$session_info{database}}) { - my $colb = $c % 2; - print $fh "
    $d", &comma_numbers($session_info{database}{$d}{count}), - "", &convert_time($session_info{database}{$d}{duration}), "", - &convert_time($session_info{database}{$d}{duration} / $session_info{database}{$d}{count}), "
    $t" . $autovacuum_info{tables}{$t}{vacuums} . + "" . $autovacuum_info{tables}{$t}{idxscans} . + "" . $autovacuum_info{tables}{$t}{tuples}{removed} . + "" . $autovacuum_info{tables}{$t}{pages}{removed} . + "
    Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . + "" . &comma_numbers($total_tuple) . "" . &comma_numbers($total_page) . "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %{$session_info{database}}) { - if ((($session_info{database}{$d}{count} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $session_info{database}{$d}{count} || 0; - } else { - $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{database}{$d}{count} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; - delete $infos{"Sum sessions < $pie_percentage_limit%"}; - } - &flotr2_piegraph($graphid++, 'databasesessions_graph', 'Sessions per database', %infos); + } + if ($#small == 0) { + $infos_tuple{$small[0]} = $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"}; + delete $infos_tuple{"Sum tuples removed < $pie_percentage_limit%"}; + } + $drawn_graphs{tuplevacuums_graph} = &flotr2_piegraph($graphid++, 'tuplevacuums_graph', 'Tuples removed per tables', %infos_tuple); + $vacuum_info = qq{$NODATA} if (!$total_count); + $total_count = &comma_numbers($total_count); + my $database = ''; + if ($main_tuple[0] =~ s/^([^\.]+)\.//) { + $database = $1; + } + print $fh qq{ +
    +

    Tuples removed per table

    +
    +

    Key values

    +
    +
      +
    • $main_tuple[0] ($main_tuple[1]) Main table with removed tuples on database $database
    • +
    • $total_tuple tuples Total removed
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{tuplevacuums_graph} +
    +
    + + + + + + + + + + + + $vacuum_info + +
    TableNumber of vacuumsIndex scansTuples removedPages removed
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{tuplevacuums_graph}; + +} + +sub print_vacuum_page_removed +{ + # VACUUM stats per table + my $total_count = 0; + my $total_idxscan = 0; + my $total_tuple = 0; + my $total_page = 0; + my $vacuum_info = ''; + my @main_tuple = ('unknown',0); + my @main_page = ('unknown',0); + foreach my $t (sort {$autovacuum_info{tables}{$b}{pages}{removed} <=> $autovacuum_info{tables}{$a}{pages}{removed}} keys %{$autovacuum_info{tables}}) { + $vacuum_info .= "$t" . $autovacuum_info{tables}{$t}{vacuums} . + "" . $autovacuum_info{tables}{$t}{idxscans} . + "" . $autovacuum_info{tables}{$t}{tuples}{removed} . + "" . $autovacuum_info{tables}{$t}{pages}{removed} . + ""; + $total_count += $autovacuum_info{tables}{$t}{vacuums}; + $total_idxscan += $autovacuum_info{tables}{$t}{idxscans}; + $total_tuple += $autovacuum_info{tables}{$t}{tuples}{removed}; + $total_page += $autovacuum_info{tables}{$t}{pages}{removed}; + if ($main_page[1] < $autovacuum_info{tables}{$t}{pages}{removed}) { + $main_page[0] = $t; + $main_page[1] = $autovacuum_info{tables}{$t}{pages}{removed}; + } + } + $vacuum_info .= "Total" . &comma_numbers($total_count) . "" . &comma_numbers($total_idxscan) . + "" . &comma_numbers($total_tuple) . "" . &comma_numbers($total_page) . ""; + + my %infos_page = (); + my @small = (); + foreach my $d (sort keys %{$autovacuum_info{tables}}) { + if ((($autovacuum_info{tables}{$d}{pages}{removed} * 100) / ($total_page || 1)) > $pie_percentage_limit) { + $infos_page{$d} = $autovacuum_info{tables}{$d}{pages}{removed} || 0; + } else { + $infos_page{"Sum pages removed < $pie_percentage_limit%"} += $autovacuum_info{tables}{$d}{pages}{removed} || 0; + push(@small, $d); } - print $fh "\n"; } + if ($#small == 0) { + $infos_page{$small[0]} = $infos_page{"Sum pages removed < $pie_percentage_limit%"}; + delete $infos_page{"Sum pages removed < $pie_percentage_limit%"}; + } + $drawn_graphs{pagevacuums_graph} = &flotr2_piegraph($graphid++, 'pagevacuums_graph', 'Tuples removed per tables', %infos_page); + $vacuum_info = qq{$NODATA} if (!$total_count); + $total_count = &comma_numbers($total_count); + my $database = ''; + if ($main_page[0] =~ s/^([^\.]+)\.//) { + $database = $1; + } + print $fh qq{ +
    +

    Pages removed per table

    +
    +

    Key values

    +
    +
      +
    • $main_page[0] ($main_page[1]) Main table with removed pages on database $database
    • +
    • $total_page pages Total removed
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{pagevacuums_graph} +
    +
    + + + + + + + + + + + + $vacuum_info + +
    TableNumber of vacuumsIndex scansTuples removedPages removed
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{pagevacuums_graph}; - # Show session per user statistics - if (exists $session_info{user}) { - print $fh qq{ -

    Sessions per user ^

    - -\n"; - $c++; + delete $drawn_graphs{lockbytype_graph}; +} + +sub print_query_type +{ + + my %data = (); + my $total_queries = 0; + my $total_select = 0; + my $total_write = 0; + foreach my $a (@SQL_ACTION) { + $total_queries += $overall_stat{$a}; + if ($a eq 'SELECT') { + $total_select += $overall_stat{$a}; + } elsif ($a ne 'OTHERS') { + $total_write += $overall_stat{$a}; } - print $fh "
    - - - - - - - +} + +sub print_lock_type +{ + my %locktype = (); + my $total_count = 0; + my $total_duration = 0; + my $locktype_info = ''; + my @main_locktype = ('unknown',0); + foreach my $t (sort keys %lock_info) { + $locktype_info .= ""; + $total_count += $lock_info{$t}{count}; + $total_duration += $lock_info{$t}{duration}; + if ($main_locktype[1] < $lock_info{$t}{count}) { + $main_locktype[0] = $t; + $main_locktype[1] = $lock_info{$t}{count}; + } + foreach my $o (sort keys %{$lock_info{$t}}) { + next if (($o eq 'count') || ($o eq 'duration') || ($o eq 'chronos')); + $locktype_info .= "\n"; + } + } + if ($total_count > 0) { + $locktype_info .= ""; + } else { + $locktype_info = qq{}; + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %lock_info) { + if ((($lock_info{$d}{count} * 100) / ($total_count||1)) > $pie_percentage_limit) { + $locktype{$d} = $lock_info{$d}{count} || 0; + } else { + $locktype{"Sum lock types < $pie_percentage_limit%"} += $lock_info{$d}{count} || 0; + push(@small, $d); + + } + } + if ($#small == 0) { + $locktype{$small[0]} = $locktype{"Sum types < $pie_percentage_limit%"}; + delete $locktype{"Sum lock types < $pie_percentage_limit%"}; + } + } + $drawn_graphs{lockbytype_graph} = &flotr2_piegraph($graphid++, 'lockbytype_graph', 'Type of locks', %locktype); + $total_count = &comma_numbers($total_count); + print $fh qq{ +

    Locks

    +
    +

    Locks by types

    +
    +

    Key values

    +
    +
      +
    • $main_locktype[0] Main Lock Type
    • +
    • $total_count locks Total
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{lockbytype_graph} +
    +
    +
    UserCountTotal DurationAvg duration (s)
    $t" . &comma_numbers($lock_info{$t}{count}) . + "" . &convert_time($lock_info{$t}{duration}) . "" . + &convert_time($lock_info{$t}{duration} / ($lock_info{$t}{count} || 1)) . "
    $o" . &comma_numbers($lock_info{$t}{$o}{count}) . + "" . &convert_time($lock_info{$t}{$o}{duration}) . "" . + &convert_time($lock_info{$t}{$o}{duration} / $lock_info{$t}{$o}{count}) . + "
    Total" . &comma_numbers($total_count) . + "" . &convert_time($total_duration) . "" . + &convert_time($total_duration / ($total_count || 1)) . "
    $NODATA
    + + + + + + + + + + + $locktype_info + +
    TypeObjectCountTotal DurationAverage Duration (s)
    + + + + + }; - my $total_count = 0; - my $c = 0; - foreach my $d (sort keys %{$session_info{user}}) { - my $colb = $c % 2; - $total_count += $session_info{user}{$d}{count}; - print $fh "
    $d", &comma_numbers($session_info{user}{$d}{count}), - "", &convert_time($session_info{user}{$d}{duration}), "", - &convert_time($session_info{user}{$d}{duration} / $session_info{user}{$d}{count}), "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %{$session_info{user}}) { - if ((($session_info{user}{$d}{count} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $session_info{user}{$d}{count} || 0; - } else { - $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{user}{$d}{count} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; - delete $infos{"Sum sessions < $pie_percentage_limit%"}; + } + my $total = $overall_stat{'queries_number'}; + + my $querytype_info = ''; + foreach my $a (@SQL_ACTION) { + $querytype_info .= "$a" . &comma_numbers($overall_stat{$a}) . + "" . sprintf("%0.2f", ($overall_stat{$a} * 100) / ($total||1)) . "%"; + } + if (($total - $total_queries) > 0) { + $querytype_info .= "OTHERS" . &comma_numbers($total - $total_queries) . + "" . sprintf("%0.2f", (($total - $total_queries) * 100) / ($total||1)) . "%"; + } + $querytype_info = qq{$NODATA} if (!$total); + + if ($graph && $total) { + foreach my $t (@SQL_ACTION) { + if ((($overall_stat{$t} * 100) / ($total||1)) > $pie_percentage_limit) { + $data{$t} = $overall_stat{$t} || 0; + } else { + $data{"Sum query types < $pie_percentage_limit%"} += $overall_stat{$t} || 0; } - &flotr2_piegraph($graphid++, 'usersessions_graph', 'Sessions per user', %infos); } - print $fh "\n"; + if (((($total - $total_queries) * 100) / ($total||1)) > $pie_percentage_limit) { + $data{'Others'} = $total - $total_queries; + } else { + $data{"Sum query types < $pie_percentage_limit%"} += $total - $total_queries; + } } + $drawn_graphs{queriesbytype_graph} = &flotr2_piegraph($graphid++, 'queriesbytype_graph', 'Type of queries', %data); - # Show session per host statistics - if (exists $session_info{host}) { - print $fh qq{ -

    Sessions per host ^

    - -\n"; - $c++; - } - print $fh "
    - - - - - - - + $total_select = &comma_numbers($total_select); + $total_write = &comma_numbers($total_write); + print $fh qq{ +

    Queries

    +
    +

    Queries by type

    +
    +

    Key values

    +
    +
      +
    • $total_select Total read queries
    • +
    • $total_write Total write queries
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{queriesbytype_graph} +
    +
    +
    HostCountTotal DurationAvg duration (s)
    + + + + + + + + + $querytype_info + +
    TypeCountPercentage
    + + + + + }; - my $total_count = 0; - my $c = 0; - foreach my $d (sort keys %{$session_info{host}}) { - my $colb = $c % 2; - $total_count += $session_info{host}{$d}{count}; - print $fh "
    $d", &comma_numbers($session_info{host}{$d}{count}), - "", &convert_time($session_info{host}{$d}{duration}), "", - &convert_time($session_info{host}{$d}{duration} / $session_info{host}{$d}{count}), "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %{$session_info{host}}) { - if ((($session_info{host}{$d}{count} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $session_info{host}{$d}{count} || 0; - } else { - $infos{"Sum sessions < $pie_percentage_limit%"} += $session_info{host}{$d}{count} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum sessions < $pie_percentage_limit%"}; - delete $infos{"Sum sessions < $pie_percentage_limit%"}; + delete $drawn_graphs{queriesbytype_graph}; + +} + +sub print_query_per_database +{ + my %infos = (); + my $total_count = 0; + my $query_database_info = ''; + my @main_database = ('unknown', 0); + foreach my $d (sort keys %database_info) { + $query_database_info .= "$dTotal" . + &comma_numbers($database_info{$d}{count}) . ""; + $total_count += $database_info{$d}{count}; + if ($main_database[1] < $database_info{$d}{count}) { + $main_database[0] = $d; + $main_database[1] = $database_info{$d}{count}; + } + foreach my $r (sort keys %{$database_info{$d}}) { + next if ($r eq 'count'); + $query_database_info .= "$r" . + &comma_numbers($database_info{$d}{$r}) . ""; + } + } + + $query_database_info = qq{$NODATA} if (!$total_count); + + if ($graph) { + my @small = (); + foreach my $d (sort keys %database_info) { + if ((($database_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { + $infos{$d} = $database_info{$d}{count} || 0; + } else { + $infos{"Sum queries per databases < $pie_percentage_limit%"} += $database_info{$d}{count} || 0; + push(@small, $d); } - &flotr2_piegraph($graphid++, 'hostsessions_graph', 'Sessions per host', %infos); } - print $fh "\n"; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum queries per databases < $pie_percentage_limit%"}; + delete $infos{"Sum queries per databases < $pie_percentage_limit%"}; + } } + $drawn_graphs{queriesbydatabase_graph} = &flotr2_piegraph($graphid++, 'queriesbydatabase_graph', 'Queries per database', %infos); + + $main_database[1] = &comma_numbers($main_database[1]); + print $fh qq{ +
    +

    Queries by database

    +
    +

    Key values

    +
    +
      +
    • $main_database[0] Main database
    • +
    • $main_database[1] Requests
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{queriesbydatabase_graph} +
    +
    + + + + + + + + + + $query_database_info + +
    DatabaseRequest typeCount
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{queriesbydatabase_graph}; + + } -sub print_connection_report +sub print_query_per_application { - if (exists $connection_info{database}) { - print $fh qq{ -

    Connections per database ^

    - -
    - - - - - - -}; - my $total_count = 0; - foreach my $d (sort keys %{$connection_info{database}}) { - print $fh "\n"; - $total_count += $connection_info{database}{$d}; - foreach my $u (sort keys %{$connection_info{user}}) { - next if (!exists $connection_info{database_user}{$d}{$u}); - print $fh "\n"; + my %infos = (); + my $total_count = 0; + my $query_application_info = ''; + my @main_application = ('unknown', 0); + foreach my $d (sort keys %application_info) { + $query_application_info .= ""; + $total_count += $application_info{$d}{count}; + if ($main_application[1] < $application_info{$d}{count}) { + $main_application[0] = $d; + $main_application[1] = $application_info{$d}{count}; + } + foreach my $r (sort keys %{$application_info{$d}}) { + next if ($r eq 'count'); + $query_application_info .= ""; + } + } + $query_application_info = qq{} if (!$total_count); + if ($graph) { + my @small = (); + foreach my $d (sort keys %application_info) { + if ((($application_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { + $infos{$d} = $application_info{$d}{count} || 0; + } else { + $infos{"Sum queries per applications < $pie_percentage_limit%"} += $application_info{$d}{count} || 0; + push(@small, $d); } } - print $fh "
    DatabaseUserCount
    $d", - &comma_numbers($connection_info{database}{$d}), "
    $u", - &comma_numbers($connection_info{database_user}{$d}{$u}), "
    $dTotal" . + &comma_numbers($application_info{$d}{count}) . "
    $r" . + &comma_numbers($application_info{$d}{$r}) . "
    $NODATA
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %{$connection_info{database}}) { - if ((($connection_info{database}{$d} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $connection_info{database}{$d} || 0; - } else { - $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{database}{$d} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; - delete $infos{"Sum connections < $pie_percentage_limit%"}; - } - &flotr2_piegraph($graphid++, 'databaseconnections_graph', 'Connections per database', %infos); + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum queries per applications < $pie_percentage_limit%"}; + delete $infos{"Sum queries per applications < $pie_percentage_limit%"}; } - print $fh "
    \n"; } + $drawn_graphs{queriesbyapplication_graph} = &flotr2_piegraph($graphid++, 'queriesbyapplication_graph', 'Queries per application', %infos); - # Show connection per user statistics - if (exists $connection_info{user}) { - print $fh qq{ -

    Connections per user ^

    - -\n"; - $total_count += $connection_info{user}{$u}; - $c++; - } - print $fh "
    - - - - - + $main_application[1] = &comma_numbers($main_application[1]); + print $fh qq{ +
    +

    Queries by application

    +
    +

    Key values

    +
    +
      +
    • $main_application[0] Main application
    • +
    • $main_application[1] Requests
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{queriesbyapplication_graph} +
    +
    +
    UserCount
    + + + + + + + + + $query_application_info + +
    DatabaseRequest typeCount
    + + + + + }; + delete $drawn_graphs{queriesbyapplication_graph}; - my $total_count = 0; - my $c = 0; - foreach my $u (sort keys %{$connection_info{user}}) { - my $colb = $c % 2; - print $fh "
    $u", &comma_numbers($connection_info{user}{$u}), - "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %{$connection_info{user}}) { - if ((($connection_info{user}{$d} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $connection_info{user}{$d} || 0; - } else { - $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{user}{$d} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; - delete $infos{"Sum connections < $pie_percentage_limit%"}; +} + +sub print_query_per_user +{ + my %infos = (); + my $total_count = 0; + my $query_user_info = ''; + my @main_user = ('unknown', 0); + foreach my $d (sort keys %user_info) { + $query_user_info .= "$dTotal" . + &comma_numbers($user_info{$d}{count}) . ""; + $total_count += $user_info{$d}{count}; + if ($main_user[1] < $user_info{$d}{count}) { + $main_user[0] = $d; + $main_user[1] = $user_info{$d}{count}; + } + foreach my $r (sort keys %{$user_info{$d}}) { + next if ($r eq 'count'); + $query_user_info .= "$r" . + &comma_numbers($user_info{$d}{$r}) . ""; + } + } + $query_user_info = qq{$NODATA} if (!$total_count); + + if ($graph) { + my @small = (); + foreach my $d (sort keys %user_info) { + if ((($user_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { + $infos{$d} = $user_info{$d}{count} || 0; + } else { + $infos{"Sum queries per users < $pie_percentage_limit%"} += $user_info{$d}{count} || 0; + push(@small, $d); } - &flotr2_piegraph($graphid++, 'userconnections_graph', 'Connections per user', %infos); } - print $fh "\n"; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum queries per users < $pie_percentage_limit%"}; + delete $infos{"Sum queries per users < $pie_percentage_limit%"}; + } } + $drawn_graphs{queriesbyuser_graph} = &flotr2_piegraph($graphid++, 'queriesbyuser_graph', 'Queries per user', %infos); - # Show connection per host statistics - if (exists $connection_info{host}) { - print $fh qq{ -

    Connections per host ^

    - -\n"; - $total_count += $connection_info{host}{$h}; - $c++; - } - print $fh "
    - - - - - + $main_user[1] = &comma_numbers($main_user[1]); + print $fh qq{ +
    +

    Queries by user

    +
    +

    Key values

    +
    +
      +
    • $main_user[0] Main user
    • +
    • $main_user[1] Requests
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{queriesbyuser_graph} +
    +
    +
    HostCount
    + + + + + + + + + $query_user_info + +
    UserRequest typeCount
    + + + + + }; + delete $drawn_graphs{queriesbyuser_graph}; - my $total_count = 0; - my $c = 0; - foreach my $h (sort keys %{$connection_info{host}}) { - my $colb = $c % 2; - print $fh "
    $h", &comma_numbers($connection_info{host}{$h}), - "
    \n"; - if ($graph && $total_count) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %{$connection_info{host}}) { - if ((($connection_info{host}{$d} * 100) / $total_count) > $pie_percentage_limit) { - $infos{$d} = $connection_info{host}{$d} || 0; - } else { - $infos{"Sum connections < $pie_percentage_limit%"} += $connection_info{host}{$d} || 0; - push(@small, $d); - } - } - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum connections < $pie_percentage_limit%"}; - delete $infos{"Sum connections < $pie_percentage_limit%"}; +} + +sub print_query_per_host +{ + my %infos = (); + my $total_count = 0; + my $query_host_info = ''; + my @main_host = ('unknown', 0); + foreach my $d (sort keys %host_info) { + $query_host_info .= "$dTotal" . + &comma_numbers($host_info{$d}{count}) . ""; + $total_count += $host_info{$d}{count}; + if ($main_host[1] < $host_info{$d}{count}) { + $main_host[0] = $d; + $main_host[1] = $host_info{$d}{count}; + } + foreach my $r (sort keys %{$host_info{$d}}) { + next if ($r eq 'count'); + $query_host_info .= "$r" . + &comma_numbers($host_info{$d}{$r}) . ""; + } + } + $query_host_info = qq{$NODATA} if (!$total_count); + + if ($graph) { + my @small = (); + foreach my $d (sort keys %host_info) { + if ((($host_info{$d}{count} * 100) / ($total_count || 1)) > $pie_percentage_limit) { + $infos{$d} = $host_info{$d}{count} || 0; + } else { + $infos{"Sum queries per hosts < $pie_percentage_limit%"} += $host_info{$d}{count} || 0; + push(@small, $d); } - &flotr2_piegraph($graphid++, 'hostconnections_graph', 'Connections per host', %infos); } - print $fh "\n"; + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum queries per hosts < $pie_percentage_limit%"}; + delete $infos{"Sum queries per hosts < $pie_percentage_limit%"}; + } } + $drawn_graphs{queriesbyhost_graph} = &flotr2_piegraph($graphid++, 'queriesbyhost_graph', 'Queries per host', %infos); + + $main_host[1] = &comma_numbers($main_host[1]); + print $fh qq{ +
    +

    Queries by host

    +
    +

    Key values

    +
    +
      +
    • $main_host[0] Main host
    • +
    • $main_host[1] Requests
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{queriesbyhost_graph} +
    +
    + + + + + + + + + + $query_host_info + +
    HostRequest typeCount
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{queriesbyhost_graph}; } + sub print_lock_queries_report { my @top_locked_queries; @@ -3909,81 +5268,143 @@ sub print_lock_queries_report # Most frequent waiting queries (N) @top_locked_queries = sort {$b->[2] <=> $a->[2]} @top_locked_queries; print $fh qq{ -

    Most frequent waiting queries (N)^

    - - - - - - - - +
    +

    Most frequent waiting queries (N)

    +
    +
    RankCountTotal wait time (s)Min/Max/Avg duration (s)Query
    + + + + + + + + + + + + }; - my $idx = 1; + + my $rank = 1; for (my $i = 0 ; $i <= $#top_locked_queries ; $i++) { - last if ($i > $end_top); - my $col = $i % 2; - print $fh " + + + + + + + + +}; } - print $fh "\n"; - $idx++; + $rank++; + } + if ($#top_locked_queries == -1) { + print $fh qq{}; } - print $fh "
    RankCountTotal timeMin timeMax timeAvg durationQuery
    ", $i + 1, "", - $top_locked_queries[$i]->[1], "", &convert_time($top_locked_queries[$i]->[2]), - "", &convert_time($top_locked_queries[$i]->[3]), "/", &convert_time($top_locked_queries[$i]->[4]), "/", - &convert_time(($top_locked_queries[$i]->[4] / $top_locked_queries[$i]->[1])), - "
    ", - &highlight_code($top_locked_queries[$i]->[0]), "
    \n"; + my $count = &comma_numbers($top_locked_queries[$i]->[1]); + my $total_time = &convert_time($top_locked_queries[$i]->[2]); + my $min_time = &convert_time($top_locked_queries[$i]->[3]); + my $max_time = &convert_time($top_locked_queries[$i]->[4]); + my $avg_time = &convert_time($top_locked_queries[$i]->[4] / ($top_locked_queries[$i]->[1] || 1)); + my $query = &highlight_code($top_locked_queries[$i]->[0]); my $k = $top_locked_queries[$i]->[0]; + my $example = qq{

    }; + $example = '' if (scalar keys %{$normalyzed_info{$k}{samples}} <= 1); + print $fh qq{ +
    $rank$count$total_time$min_time$max_time$avg_time +
    $query
    + $example + +
    +
    +}; if ($normalyzed_info{$k}{count} > 1) { - print $fh -"
    "; - my $j = 0; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - my $colb = $j % 2; - my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); - $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); - $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); - $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); - $db =~ s/^, / - /; - print $fh -"
    ", - &convert_time($d), " | ", &highlight_code($normalyzed_info{$k}{samples}{$d}{query}), "
    "; - $j++; + $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); + my $details = "[ Date: $normalyzed_info{$k}{samples}{$d}{date}"; + $details .= " - Duration: " . &convert_time($d) if ($normalyzed_info{$k}{samples}{$d}{duration}); + $details .= " - Database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); + $details .= " - User: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); + $details .= " - Remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); + $details .= " - Application: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); + $details .= " ]"; + print $fh qq{ +
    $query
    +
    $details
    +}; + } - print $fh "
    "; + print $fh qq{ +
    +

    +
    + +
    $NODATA
    \n"; + print $fh qq{ + + + + +}; + @top_locked_queries = (); # Queries that waited the most @top_locked_info = sort {$b->[1] <=> $a->[1]} @top_locked_info; print $fh qq{ -

    Queries that waited the most^

    - - - - - - +
    +

    Queries that waited the most

    +
    +
    RankWait time (s)Query
    + + + + + + + + }; + + $rank = 1; for (my $i = 0 ; $i <= $#top_locked_info ; $i++) { - my $col = $i % 2; - my $ttl = $top_locked_info[$i]->[1] || ''; - my $db = " - database: $top_locked_info[$i]->[3]" if ($top_locked_info[$i]->[3]); - $db .= ", user: $top_locked_info[$i]->[4]" if ($top_locked_info[$i]->[4]); - $db .= ", remote: $top_locked_info[$i]->[5]" if ($top_locked_info[$i]->[5]); - $db .= ", app: $top_locked_info[$i]->[6]" if ($top_locked_info[$i]->[6]); - $db =~ s/^, / - /; - print $fh "\n"; - } - print $fh "
    RankWait timeQuery
    ", $i + 1, "", - &convert_time($top_locked_info[$i]->[0]), - "
    ", - &highlight_code($top_locked_info[$i]->[2]), "
    \n"; + my $query = &highlight_code($top_locked_info[$i]->[2]); + my $details = "[ Date: " . ($top_locked_info[$i]->[1] || ''); + $details .= " - Database: $top_locked_info[$i]->[3]" if ($top_locked_info[$i]->[3]); + $details .= " - User: $top_locked_info[$i]->[4]" if ($top_locked_info[$i]->[4]); + $details .= " - Remote: $top_locked_info[$i]->[5]" if ($top_locked_info[$i]->[5]); + $details .= " - Application: $top_locked_info[$i]->[6]" if ($top_locked_info[$i]->[6]); + $details .= " ]"; + my $time = &convert_time($top_locked_info[$i]->[0]); + print $fh qq{ + + $rank + $time + +
    $query
    +
    $details
    + + +}; + $rank++; + } + if ($#top_locked_info == -1) { + print $fh qq{$NODATA}; + } + print $fh qq{ + + + + +}; + } sub print_tempfile_report { - my @top_temporary = ();; + my @top_temporary = (); foreach my $h (keys %normalyzed_info) { if (exists($normalyzed_info{$h}{tempfiles})) { push (@top_temporary, [$h, $normalyzed_info{$h}{tempfiles}{count}, $normalyzed_info{$h}{tempfiles}{size}, @@ -3995,334 +5416,549 @@ sub print_tempfile_report if ($#top_temporary >= 0) { @top_temporary = sort {$b->[1] <=> $a->[1]} @top_temporary; print $fh qq{ -

    Queries generating the most temporary files (N)^

    - - - - - - - - +
    +

    Queries generating the most temporary files (N)

    +
    +
    RankCountTotal sizeMin/Max/Avg sizeQuery
    + + + + + + + + + + + + }; - my $idx = 1; + my $rank = 1; for (my $i = 0 ; $i <= $#top_temporary ; $i++) { - last if ($i > $end_top); - my $col = $i % 2; - print $fh " + + + + + + + + +}; } - print $fh "\n"; - $idx++; + $rank++; } - print $fh "
    RankCountTotal sizeMin sizeMax sizeAvg sizeQuery
    ", $i + 1, "", - $top_temporary[$i]->[1], "", &comma_numbers($top_temporary[$i]->[2]), - "", &comma_numbers($top_temporary[$i]->[3]), - "/", &comma_numbers($top_temporary[$i]->[4]), "/", - &comma_numbers(sprintf("%.2f", $top_temporary[$i]->[2] / $top_temporary[$i]->[1])), - "
    ", - &highlight_code($top_temporary[$i]->[0]), "
    "; + my $count = &comma_numbers($top_temporary[$i]->[1]); + my $total_size = &pretty_print_size($top_temporary[$i]->[2]); + my $min_size = &pretty_print_size($top_temporary[$i]->[3]); + my $max_size = &pretty_print_size($top_temporary[$i]->[4]); + my $avg_size = &pretty_print_size($top_temporary[$i]->[2] / ($top_temporary[$i]->[1] || 1)); + my $query = &highlight_code($top_temporary[$i]->[0]); + my $example = qq{

    }; + $example = '' if ($count <= 1); + print $fh qq{ +
    $rank$count$total_size$min_size$max_size$avg_size +
    $query
    + $example + +
    +
    +}; my $k = $top_temporary[$i]->[0]; if ($normalyzed_info{$k}{count} > 1) { - print $fh "
    "; - my $i = 0; foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - my $colb = $i % 2; - my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); - $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); - $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); - $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); - $db =~ s/^, / - /; - print $fh "
    ", - &convert_time($d), " | ", &highlight_code($normalyzed_info{$k}{samples}{$d}{query}), "
    "; - $i++; + $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); + my $details = "Duration: " . &convert_time($d) . "
    "; + $details .= "Database: $normalyzed_info{$k}{samples}{$d}{db}
    " if ($normalyzed_info{$k}{samples}{$d}{db}); + $details .= "User: $normalyzed_info{$k}{samples}{$d}{user}
    " if ($normalyzed_info{$k}{samples}{$d}{user}); + $details .= "Remote: $normalyzed_info{$k}{samples}{$d}{remote}
    " if ($normalyzed_info{$k}{samples}{$d}{remote}); + $details .= "Application: $normalyzed_info{$k}{samples}{$d}{app}
    " if ($normalyzed_info{$k}{samples}{$d}{app}); + print $fh qq{ +
    $query
    +
    $details
    +}; + } - print $fh "
    "; + print $fh qq{ +
    +

    +
    + +
    \n"; + print $fh qq{ + + + + +}; + @top_temporary = (); } + # Top queries generating the largest temporary files if ($#top_tempfile_info >= 0) { - @top_tempfile_info = sort {$b->[1] <=> $a->[1]} @top_tempfile_info; - + @top_tempfile_info = sort {$b->[0] <=> $a->[0]} @top_tempfile_info; + my $largest = &comma_numbers($top_temporary[0]->[0]); print $fh qq{ -

    Queries generating the largest temporary files^

    - - - - - - +
    +

    Queries generating the largest temporary files

    +
    +
    RankSizeQuery
    + + + + + + + + }; + my $rank = 1; for (my $i = 0 ; $i <= $#top_tempfile_info ; $i++) { - my $col = $i % 2; - my $ttl = $top_tempfile_info[$i]->[1] || ''; - my $db = " - database: $top_tempfile_info[$i]->[3]" if ($top_tempfile_info[$i]->[3]); - $db .= ", user: $top_tempfile_info[$i]->[4]" if ($top_tempfile_info[$i]->[4]); - $db .= ", remote: $top_tempfile_info[$i]->[5]" if ($top_tempfile_info[$i]->[5]); - $db .= ", app: $top_tempfile_info[$i]->[6]" if ($top_tempfile_info[$i]->[6]); - $db =~ s/^, / - /; - print $fh "\n"; + my $size = &pretty_print_size($top_tempfile_info[$i]->[0]); + my $details = "[ Date: $top_tempfile_info[$i]->[1]"; + $details .= " - Database: $top_tempfile_info[$i]->[3]" if ($top_tempfile_info[$i]->[3]); + $details .= " - User: $top_tempfile_info[$i]->[4]" if ($top_tempfile_info[$i]->[4]); + $details .= " - Remote: $top_tempfile_info[$i]->[5]" if ($top_tempfile_info[$i]->[5]); + $details .= " - Application: $top_tempfile_info[$i]->[6]" if ($top_tempfile_info[$i]->[6]); + $details .= " ]"; + my $query = &highlight_code($top_tempfile_info[$i]->[2]); + print $fh qq{ + + + + + +}; + $rank++; } - print $fh "
    RankSizeQuery
    ", $i + 1, "", - &comma_numbers($top_tempfile_info[$i]->[0]), - "
    ", - &highlight_code($top_tempfile_info[$i]->[2]), "
    $rank$size +
    $query
    +
    $details
    +
    \n"; + print $fh qq{ + + + + +}; @top_tempfile_info = (); } } -sub print_top_reports +sub print_slowest_individual_queries { - if ($#top_slowest >= 0) { + + print $fh qq{ +

    Top Queries

    +
    +

    Slowest individual query

    +
    + + + + + + + + + +}; + + for (my $i = 0 ; $i <= $#top_slowest ; $i++) { + my $rank = $i + 1; + my $duration = &convert_time($top_slowest[$i]->[0]); + my $date = $top_slowest[$i]->[1] || ''; + my $details = "[ Date: " . ($top_slowest[$i]->[1] || ''); + $details .= " - Database: $top_slowest[$i]->[3]" if ($top_slowest[$i]->[3]); + $details .= " - User: $top_slowest[$i]->[4]" if ($top_slowest[$i]->[4]); + $details .= " - Remote: $top_slowest[$i]->[5]" if ($top_slowest[$i]->[5]); + $details .= " - Application: $top_slowest[$i]->[6]" if ($top_slowest[$i]->[6]); + $details .= " ]"; + my $query = &highlight_code($top_slowest[$i]->[2]); print $fh qq{ -

    Slowest queries ^

    -
    RankDurationQuery
    - - - - - - + + + + + + + }; + } + if ($#top_slowest == -1) { + print $fh qq{}; + } + print $fh qq{ + +
    RankDuration (s)Query
    $rank$duration +
    $query
    +
    $details
    +
    $NODATA
    +
    +
    }; - for (my $i = 0 ; $i <= $#top_slowest ; $i++) { - my $col = $i % 2; - my $ttl = $top_slowest[$i]->[1] || ''; - my $db = " - database: $top_slowest[$i]->[3]" if ($top_slowest[$i]->[3]); - $db .= ", user: $top_slowest[$i]->[4]" if ($top_slowest[$i]->[4]); - $db .= ", remote: $top_slowest[$i]->[5]" if ($top_slowest[$i]->[5]); - $db .= ", app: $top_slowest[$i]->[6]" if ($top_slowest[$i]->[6]); - $db =~ s/^, / - /; - print $fh "", $i + 1, "", - &convert_time($top_slowest[$i]->[0]), - "
    ", - &highlight_code($top_slowest[$i]->[2]), "
    \n"; - } - print $fh "\n"; +} + +sub print_time_consuming +{ + print $fh qq{ +
    +

    Time consuming queries

    +
    + + + + + + + + + + + + + +}; + my $rank = 1; + foreach my $k (sort {$normalyzed_info{$b}{duration} <=> $normalyzed_info{$a}{duration}} keys %normalyzed_info) { + next if (!$normalyzed_info{$k}{count}); + last if ($rank > $top); + $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; + my $duration = &convert_time($normalyzed_info{$k}{duration}); + my $count = &comma_numbers($normalyzed_info{$k}{count}); + my $min = &convert_time($normalyzed_info{$k}{min}); + my $max = &convert_time($normalyzed_info{$k}{max}); + my $avg = &convert_time($normalyzed_info{$k}{average}); + my $query = &highlight_code($k); print $fh qq{ -

    Queries that took up the most time (N) ^

    -
    RankTotal durationTimes executedMin durationMax durationAvg durationQuery
    - - - - - - - - + + + + + + + + \n"; - $idx++; + foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { + my $details = "[ Date: $normalyzed_info{$k}{samples}{$d}{date}"; + $details .= " - Duration: " . &convert_time($d); + $details .= " - Database: $normalyzed_info{$k}{samples}{$d}{details}" if ($normalyzed_info{$k}{samples}{$d}{details}); + $details .= " - User: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); + $details .= " - Remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); + $details .= " - Application: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); + $details .= " ]"; + $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); + print $fh qq{ +
    $query
    +
    $details
    +}; } - print $fh "
    RankTotal durationTimes executedMin/Max/Avg duration (s)Query
    $rank$duration$count +

    Details

    +
    $min$max$avg +
    $query
    + +
    +

    Times Reported Time consuming queries #$rank

    + + + + + + + + + + + }; - my $idx = 1; - foreach my $k (sort {$normalyzed_info{$b}{duration} <=> $normalyzed_info{$a}{duration}} keys %normalyzed_info) { - next if (!$normalyzed_info{$k}{count}); - last if ($idx > $top); - my $q = $k; - if ($normalyzed_info{$k}{count} == 1) { - foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - $q = $normalyzed_info{$k}{samples}{$d}{query}; - last; - } - } - $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; - my $col = $idx % 2; - print $fh ""; - print $fh " +
    DayHourCountDurationAvg duration
    $idx", - &convert_time($normalyzed_info{$k}{duration}), - "
    ", - &comma_numbers($normalyzed_info{$k}{count}), -"
    "; - foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { - my $c = 1; - $d =~ /^\d{4}(\d{2})(\d{2})$/; - my $zday = "$abbr_month{$1} $2"; - foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { - $normalyzed_info{$k}{chronos}{$d}{$h}{average} = - $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; - my $colb = $c % 2; - $zday = " " if ($c > 1); - print $fh ""; - $c++; - } + my $details = ''; + foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { + $normalyzed_info{$k}{chronos}{$d}{$h}{average} = + $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / ($normalyzed_info{$k}{chronos}{$d}{$h}{count} || 1); + $details .= ""; + $zday = ""; } - print $fh "
    DayHourCountDurationAvg Duration
    $zday$h", - &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}), "", - &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}), "", - &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}), "
    $zday$h" . + &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}) . "" . + &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}) . "" . + &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}) . "
    ", &convert_time($normalyzed_info{$k}{min}),"/", &convert_time($normalyzed_info{$k}{max}),"/", &convert_time($normalyzed_info{$k}{average}), - "
    ", - &highlight_code($q), "
    "; + } + print $fh qq{ + $details +
    +

    +
    +

    + +
    +
    +}; - if ($normalyzed_info{$k}{count} > 1) { - print $fh -"
    "; - my $i = 0; - foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - my $colb = $i % 2; - my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); - $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); - $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); - $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); - $db =~ s/^, / - /; - print $fh -"
    ", - &convert_time($d), " | ", &highlight_code($normalyzed_info{$k}{samples}{$d}{query}), "
    "; - $i++; - } - print $fh "
    "; - } - print $fh "
    \n"; + print $fh qq{ + +

    +
    + + + +}; + $rank++; + } + + if (scalar keys %normalyzed_info == 0) { + print $fh qq{$NODATA}; } + print $fh qq{ + + +
    + +}; - if (scalar keys %normalyzed_info > 0) { +} - print $fh qq{ -

    Most frequent queries (N) ^

    - - - - - - - - +sub print_most_frequent +{ + print $fh qq{ +
    +

    Most frequent queries (N)

    +
    +
    RankTimes executedTotal durationMin/Max/Avg duration (s)Query
    + + + + + + + + + + + + }; - my $idx = 1; - foreach my $k (sort {$normalyzed_info{$b}{count} <=> $normalyzed_info{$a}{count}} keys %normalyzed_info) { - next if (!$normalyzed_info{$k}{count}); - last if ($idx > $top); - my $q = $k; - if ($normalyzed_info{$k}{count} == 1) { - foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - $q = $normalyzed_info{$k}{samples}{$d}{query}; - last; - } - } - my $col = $idx % 2; - print $fh -"\n"; - $idx++; + foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { + my $details = "[ Date: $normalyzed_info{$k}{samples}{$d}{date}"; + $details .= " - Duration: " . &convert_time($d); + $details .= " - Database: $normalyzed_info{$k}{samples}{$d}{details}" if ($normalyzed_info{$k}{samples}{$d}{details}); + $details .= " - User: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); + $details .= " - Remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); + $details .= " - Application: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); + $details .= " ]"; + $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); + print $fh qq{ +
    $query
    +
    $details
    +}; } - print $fh "
    RankTimes executedTotal durationMin durationMax durationAvg durationQuery
    $idx
    ", - &comma_numbers($normalyzed_info{$k}{count}), -"
    "; - foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { - my $c = 1; - $d =~ /^\d{4}(\d{2})(\d{2})$/; - my $zday = "$abbr_month{$1} $2"; - foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { - $normalyzed_info{$k}{chronos}{$d}{$h}{average} = - $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; - my $colb = $c % 2; - $zday = " " if ($c > 1); - print $fh ""; - $c++; - } + my $rank = 1; + foreach my $k (sort {$normalyzed_info{$b}{count} <=> $normalyzed_info{$a}{count}} keys %normalyzed_info) { + next if (!$normalyzed_info{$k}{count}); + last if ($rank > $top); + $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; + my $duration = &convert_time($normalyzed_info{$k}{duration}); + my $count = &comma_numbers($normalyzed_info{$k}{count}); + my $min = &convert_time($normalyzed_info{$k}{min}); + my $max = &convert_time($normalyzed_info{$k}{max}); + my $avg = &convert_time($normalyzed_info{$k}{average}); + my $query = &highlight_code($k); + print $fh qq{ + + + + + + + + "; - print $fh " +
    DayHourCountDurationAvg Duration
    $zday$h", - &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}), "", - &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}), "", - &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}), "
    $rank$count +

    Details

    +
    $duration$min$max$avg +
    $query
    + +
    +

    Times Reported Time consuming queries #$rank

    + + + + + + + + + + +}; + my $details = ''; + foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { + $normalyzed_info{$k}{chronos}{$d}{$h}{average} = + $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; + $details .= ""; + $zday = ""; } - print $fh "
    DayHourCountDuration
    $zday$h" . + &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}) . "" . + &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}) . "" . + &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}) . "
    ", &convert_time($normalyzed_info{$k}{duration}), "",&convert_time($normalyzed_info{$k}{min}),"/",&convert_time($normalyzed_info{$k}{max}),"/", - &convert_time($normalyzed_info{$k}{average}), "
    ", - &highlight_code($q), "
    "; + } + print $fh qq{ + $details +
    +

    +
    +

    + +
    +
    +}; - if ($normalyzed_info{$k}{count} > 1) { - print $fh -"
    "; - my $i = 0; - foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - my $colb = $i % 2; - my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); - $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); - $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); - $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); - $db =~ s/^, / - /; - print $fh -"
    ", - &convert_time($d), " | ", &highlight_code($normalyzed_info{$k}{samples}{$d}{query}), "
    "; - $i++; - } - print $fh "
    "; - } - print $fh "
    \n"; + print $fh qq{ + +

    + + + + +}; + $rank++; } + if (scalar keys %normalyzed_info == 0) { + print $fh qq{$NODATA}; + } + print $fh qq{ + + + + +}; - if ($#top_slowest >= 0) { - print $fh qq{ -

    Slowest queries (N) ^

    - - - - - - - - - +} + + +sub print_slowest_queries +{ + print $fh qq{ +
    +

    Normalized slowest queries (N)

    +
    +
    RankMin/Max/Avg duration (s)Times executedTotal durationQuery
    + + + + + + + + + + + + }; - my $idx = 1; - foreach my $k (sort {$normalyzed_info{$b}{average} <=> $normalyzed_info{$a}{average}} keys %normalyzed_info) { - next if (!$k || !$normalyzed_info{$k}{count}); - last if ($idx > $top); - my $q = $k; - if ($normalyzed_info{$k}{count} == 1) { - foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - $q = $normalyzed_info{$k}{samples}{$d}{query}; - last; - } - } - my $col = $idx % 2; - print $fh ""; - print $fh " + + + + + + + +
    RankMin durationMax durationAvg durationTimes executedTotal durationQuery
    $idx", - &convert_time($normalyzed_info{$k}{min}), "/", - &convert_time($normalyzed_info{$k}{max}), "/", - &convert_time($normalyzed_info{$k}{average}), - "
    ", - &comma_numbers($normalyzed_info{$k}{count}), -"
    "; - foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { - my $c = 1; - $d =~ /^\d{4}(\d{2})(\d{2})$/; - my $zday = "$abbr_month{$1} $2"; - foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { - $normalyzed_info{$k}{chronos}{$d}{$h}{average} = - $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; - my $colb = $c % 2; - $zday = " " if ($c > 1); - print $fh ""; - $c++; - } - } - print $fh "
    DayHourCountDurationAvg Duration
    $zday$h", - &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}), "", - &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}), "", - &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}), "
    ", &convert_time($normalyzed_info{$k}{duration}), - "
    ", - &highlight_code($q), "
    "; - if ($normalyzed_info{$k}{count} > 1) { - print $fh -"
    "; - my $i = 0; - foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { - my $colb = $i % 2; - my $db = " - database: $normalyzed_info{$k}{samples}{$d}{db}" if ($normalyzed_info{$k}{samples}{$d}{db}); - $db .= ", user: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); - $db .= ", remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); - $db .= ", app: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); - $db =~ s/^, / - /; - print $fh -"
    ", - &convert_time($d), " | ", &highlight_code($normalyzed_info{$k}{samples}{$d}{query}), "
    "; - $i++; - } - print $fh "
    "; + my $rank = 1; + foreach my $k (sort {$normalyzed_info{$b}{average} <=> $normalyzed_info{$a}{average}} keys %normalyzed_info) { + next if (!$k || !$normalyzed_info{$k}{count}); + last if ($rank > $top); + $normalyzed_info{$k}{average} = $normalyzed_info{$k}{duration} / $normalyzed_info{$k}{count}; + my $duration = &convert_time($normalyzed_info{$k}{duration}); + my $count = &comma_numbers($normalyzed_info{$k}{count}); + my $min = &convert_time($normalyzed_info{$k}{min}); + my $max = &convert_time($normalyzed_info{$k}{max}); + my $avg = &convert_time($normalyzed_info{$k}{average}); + my $query = &highlight_code($k); + print $fh qq{ +
    $rank$min$max$avg$count +

    Details

    +
    $duration +
    $query
    + +
    +

    Times Reported Time consuming queries #$rank

    + + + + + + + + + + + +}; + my $details = ''; + foreach my $d (sort keys %{$normalyzed_info{$k}{chronos}}) { + my $c = 1; + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort keys %{$normalyzed_info{$k}{chronos}{$d}}) { + $normalyzed_info{$k}{chronos}{$d}{$h}{average} = + $normalyzed_info{$k}{chronos}{$d}{$h}{duration} / $normalyzed_info{$k}{chronos}{$d}{$h}{count}; + $details .= ""; + $zday = ""; } - print $fh "\n"; - $idx++; } - print $fh "
    DayHourCountDurationAvg duration
    $zday$h" . + &comma_numbers($normalyzed_info{$k}{chronos}{$d}{$h}{count}) . "" . + &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{duration}) . "" . + &convert_time($normalyzed_info{$k}{chronos}{$d}{$h}{average}) . "
    \n"; + print $fh qq{ + $details +
    +

    + +

    + +
    +
    +}; + + foreach my $d (sort {$b <=> $a} keys %{$normalyzed_info{$k}{samples}}) { + my $details = "[ Date: $normalyzed_info{$k}{samples}{$d}{date}"; + $details .= " - Duration: " . &convert_time($d); + $details .= " - Database: $normalyzed_info{$k}{samples}{$d}{details}" if ($normalyzed_info{$k}{samples}{$d}{details}); + $details .= " - User: $normalyzed_info{$k}{samples}{$d}{user}" if ($normalyzed_info{$k}{samples}{$d}{user}); + $details .= " - Remote: $normalyzed_info{$k}{samples}{$d}{remote}" if ($normalyzed_info{$k}{samples}{$d}{remote}); + $details .= " - Application: $normalyzed_info{$k}{samples}{$d}{app}" if ($normalyzed_info{$k}{samples}{$d}{app}); + $details .= " ]"; + $query = &highlight_code($normalyzed_info{$k}{samples}{$d}{query}); + print $fh qq{ +
    $query
    +
    $details
    +}; + } + print $fh qq{ +
    +

    +
    + + + +}; + $rank++; + } + if (scalar keys %normalyzed_info == 0) { + print $fh qq{$NODATA}; } + print $fh qq{ + + + + +}; + } sub dump_as_html @@ -4331,115 +5967,180 @@ sub dump_as_html # Dump the html header &html_header(); - # Global information - &print_global_information(); + if (!$error_only) { - # Overall statistics - &print_overall_statistics(); + # Overall statistics + print $fh qq{ +
  • +}; + &print_overall_statistics(); + + # Set graphs limits + $overall_stat{'first_log_ts'} =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; + $t_min = timegm_nocheck(0, $5, $4, $3, $2 - 1, $1) * 1000; + $t_min -= ($avg_minutes * 60000); + + $overall_stat{'last_log_ts'} =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; + $t_max = timegm_nocheck(59, $5, $4, $3, $2 - 1, $1) * 1000; + $t_max += ($avg_minutes * 60000); + + if (!$disable_hourly) { + # Build graphs based on hourly stat + &compute_query_graphs(); + + # Show global SQL traffic + &print_sql_traffic(); + + # Show hourly statistics + &print_general_activity(); + + } + + if (!$disable_connection) { + print $fh qq{ +
  • +
  • +}; + + # Draw connections indormation + &print_simultaneous_connection() if (!$disable_hourly); + + # Show per database/user connections + &print_database_connection() if (exists $connection_info{database}); - # Set graphs limits - $overall_stat{'first_log_ts'} =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; - $t_min = timegm_nocheck(0, $5, $4, $3, $2 - 1, $1) * 1000; - $t_min -= ($avg_minutes * 60000); + # Show per user connections + &print_user_connection() if (exists $connection_info{user}); + + # Show per client ip connections + &print_host_connection() if (exists $connection_info{host}); + } - $overall_stat{'last_log_ts'} =~ /(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)/; - $t_max = timegm_nocheck(59, $5, $4, $3, $2 - 1, $1) * 1000; - $t_max += ($avg_minutes * 60000); - if (!$disable_hourly) { + # Show session per database statistics + if (!$disable_session) { + print $fh qq{ +
  • +
  • +}; + # Show per database sessions + &print_database_session(); + # Show per user sessions + &print_user_session(); + # Show per host sessions + &print_host_session(); + } - # Start creating hourly reports about queries and connections - &print_hourly_reports(); # Display checkpoint and temporary files report - if ($tempfile_info{count} || $checkpoint_info{wbuffer} || $restartpoint_info{wbuffer} || $autovacuum_info{chronos} || $autovacuum_info{count}) { - &print_checkpoint_tempfile_vacuum_reports(); + if (!$disable_checkpoint) { + print $fh qq{ +
  • +
  • + }; + &print_checkpoint(); } - } - %per_minute_info = (); - # Create detailed vacuum reports - &print_vacuum_reports(); + if (!$disable_temporary) { + print $fh qq{ +
  • +
  • +}; + # Show temporary files detailed informations + &print_temporary_file(); - # INSERT/DELETE/UPDATE/SELECT repartition - &print_query_type_report() if (!$disable_type); + # Show informations about queries generating temporary files + &print_tempfile_report(); + } - # Show request per database statistics - &print_request_report() if (!$disable_type); + if (!$disable_autovacuum) { + print $fh qq{ +
  • +
  • +}; + # Show vacuums/analyses detailed informations + &print_vacuum(); - # Lock stats per type - &print_locktype_report() if (!$disable_lock); + } - # Show session per database statistics - &print_session_report() if (!$disable_session); + if (!$disable_lock) { + print $fh qq{ +
  • +
  • +}; + # Lock stats per type + &print_lock_type(); - # Show connection per database statistics - &print_connection_report() if (!$disable_connection); + # Show lock wait detailed informations + &print_lock_queries_report(); + } - # Show lock wait detailed informations - &print_lock_queries_report() if (!$disable_lock && (scalar keys %lock_info > 0)); + %per_minute_info = (); - # Show temporary files detailed informations - &print_tempfile_report() if (!$disable_temporary && (scalar keys %tempfile_info > 0)); + if (!$disable_query) { + print $fh qq{ +
  • +
  • +}; + # INSERT/DELETE/UPDATE/SELECT repartition + if (!$disable_type) { + &print_query_type(); - # Show top information - &print_top_reports() if (!$disable_query); + # Show requests per database + &print_query_per_database(); - # Show errors report - if (!$disable_error) { - &show_error_as_html(); - } + # Show requests per user + &print_query_per_user(); - # Dump the html footer - &html_footer(); + # Show requests per host + &print_query_per_host(); -} + # Show requests per application + &print_query_per_application(); + } -sub dump_error_as_html -{ + print $fh qq{ +
  • +
  • +}; + # Show top information + &print_slowest_individual_queries(); - # Dump the html header - &html_header(); + # Show queries that took up the most time + &print_time_consuming(); + + # Show most frequent queries + &print_most_frequent(); + + # Print normalized slowest queries + &print_slowest_queries + } - # Global information - my $curdate = localtime(time); - my $fmt_nlines = &comma_numbers($nlines); - my $total_time = timestr($td); - $total_time =~ s/^([\.0-9]+) wallclock.*/$1/; - $total_time = &convert_time($total_time * 1000); - my $logfile_str = $log_files[0]; - if ($#log_files > 0) { - $logfile_str .= ', ..., ' . $log_files[-1]; } - print $fh qq{ -
    -
      -
    • Generated on $curdate
    • -
    • Log file: $logfile_str
    • -
    • Parsed $fmt_nlines log entries in $total_time
    • -
    • Log start from $overall_stat{'first_log_ts'} to $overall_stat{'last_log_ts'}
    • -
    -
    + + # Show errors report + if (!$disable_error) { + + if (!$error_only) { + print $fh qq{ +
  • +
  • }; - my $fmt_errors = &comma_numbers($overall_stat{'errors_number'}) || 0; - my $fmt_unique_error = &comma_numbers(scalar keys %{$overall_stat{'unique_normalized_errors'}}) || 0; - print $fh qq{ -
    -

    Overall statistics ^

    -
    -
    -
      -
    • Number of events: $fmt_errors
    • -
    • Number of unique normalized events: $fmt_unique_error
    • -
    -
    -
    + } else { + print $fh qq{ +
  • }; + } + # Show log level distribution + &print_log_level(); + + # Show Most Frequent Errors/Events + &show_error_as_html(); + } - &show_error_as_html(); # Dump the html footer &html_footer(); + } sub escape_html @@ -4449,54 +6150,15 @@ sub escape_html return $_[0]; } -sub show_error_as_html +sub print_log_level { + my %infos = (); - return if (scalar keys %error_info == 0); - - print $fh qq{ -

    Most frequent events (N) ^

    - - - - - - - -}; - my $idx = 1; + # Some message have seen their log level change during log parsing. + # Set the real log level count back foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { next if (!$error_info{$k}{count}); - last if ($idx > $top); - my $col = $idx % 2; - print $fh -"\n"; if ($error_info{$k}{count} > 1) { - my $msg = $k; - $msg =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/; - $msg =~ s/ERROR: (database system was shut down)/LOG: $1/; - $msg =~ s/ERROR: (database system was interrupted while in recovery)/LOG: $1/; - $msg =~ s/ERROR: (recovery has paused)/LOG: $1/; - # Escape HTML code in error message - $msg = &escape_html($msg); - print $fh "\n"; - $idx++; + } + } + + # Show log types + my $total_logs = 0; + foreach my $d (sort keys %logs_type) { + $total_logs += $logs_type{$d}; + } + + my $logtype_info = ''; + foreach my $d (sort keys %logs_type) { + next if (!$logs_type{$d}); + $logtype_info .= ""; + } + if ($graph) { + my @small = (); + foreach my $d (sort keys %logs_type) { + if ((($logs_type{$d} * 100) / ($total_logs || 1)) > $pie_percentage_limit) { + $infos{$d} = $logs_type{$d} || 0; + } else { + $infos{"Sum log types < $pie_percentage_limit%"} += $logs_type{$d} || 0; + push(@small, $d); + } + } + + if ($#small == 0) { + $infos{$small[0]} = $infos{"Sum log types < $pie_percentage_limit%"}; + delete $infos{"Sum log types < $pie_percentage_limit%"}; + } + } + $drawn_graphs{logstype_graph} = &flotr2_piegraph($graphid++, 'logstype_graph', 'Logs per type', %infos); + if (!$total_logs) { + $logtype_info = qq{}; } - print $fh "
    RankTimes reportedError
    $idx
    ", - &comma_numbers($error_info{$k}{count}), ""; - print $fh "
    "; - foreach my $d (sort keys %{$error_info{$k}{chronos}}) { - my $c = 1; - $d =~ /^\d{4}(\d{2})(\d{2})$/; - my $zday = "$abbr_month{$1} $2"; - foreach my $h (sort keys %{$error_info{$k}{chronos}{$d}}) { - my $colb = $c % 2; - $zday = " " if ($c > 1); - print $fh ""; - $c++; - } - } - print $fh "
    DayHourCount
    $zday$h", - &comma_numbers($error_info{$k}{chronos}{$d}{$h}{count}), "
    $msg
    "; - print $fh -"
    "; for (my $i = 0 ; $i <= $#{$error_info{$k}{date}} ; $i++) { if ( ($error_info{$k}{error}[$i] =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/) || ($error_info{$k}{error}[$i] =~ s/ERROR: (database system was shut down)/LOG: $1/) @@ -4506,22 +6168,7 @@ sub show_error_as_html $logs_type{ERROR}--; $logs_type{LOG}++; } - # Escape HTML code in error message - $error_info{$k}{error}[$i] = &escape_html($error_info{$k}{error}[$i]); - $error_info{$k}{statement}[$i] = &escape_html($error_info{$k}{statement}[$i]); - - my $c = $i % 2; - print $fh "
    $error_info{$k}{error}[$i]
    \n"; - print $fh "
    Detail: $error_info{$k}{detail}[$i]
    \n" - if ($error_info{$k}{detail}[$i]); - print $fh "
    Context: $error_info{$k}{context}[$i]
    \n" - if ($error_info{$k}{context}[$i]); - print $fh "
    Hint: $error_info{$k}{hint}[$i]
    \n" if ($error_info{$k}{hint}[$i]); - print $fh "
    Statement: $error_info{$k}{statement}[$i]
    \n" - if ($error_info{$k}{statement}[$i]); - print $fh "
    Database: $error_info{$k}{db}[$i]
    \n" if ($error_info{$k}{db}[$i]); } - print $fh "
    "; } else { if ( ($error_info{$k}{error}[0] =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/) || ($error_info{$k}{error}[0] =~ s/ERROR: (database system was shut down)/LOG: $1/) @@ -4531,74 +6178,235 @@ sub show_error_as_html $logs_type{ERROR}--; $logs_type{LOG}++; } - # Escape HTML code in error message - $error_info{$k}{error}[0] = &escape_html($error_info{$k}{error}[0]); - $error_info{$k}{statement}[0] = &escape_html($error_info{$k}{statement}[0]); - - print $fh "
    $error_info{$k}{error}[0]
    "; - print $fh "
    Detail: $error_info{$k}{detail}[0]
    \n" if ($error_info{$k}{detail}[0]); - print $fh "
    Context: $error_info{$k}{context}[0]
    \n" if ($error_info{$k}{context}[0]); - print $fh "
    Hint: $error_info{$k}{hint}[0]
    \n" if ($error_info{$k}{hint}[0]); - print $fh "
    Statement: $error_info{$k}{statement}[0]
    \n" - if ($error_info{$k}{statement}[0]); - print $fh "
    Database: $error_info{$k}{db}[0]
    \n" if ($error_info{$k}{db}[0]); - } - print $fh "
    $d" . &comma_numbers($logs_type{$d}) . + "" . sprintf("%0.2f", ($logs_type{$d} * 100) / ($total_logs||1)) . "%
    $NODATA
    \n"; - if (scalar keys %logs_type > 0) { + $total_logs = &comma_numbers($total_logs); + print $fh qq{ +

    Events

    +
    +

    Log levels

    +
    +

    Key values

    +
    +
      +
    • $total_logs Log entries
    • +
    • $logs_type{ERROR} Number of errors
    • +
    +
    +
    +
    +
    + +
    +
    + $drawn_graphs{logstype_graph} +
    +
    + + + + + + + + + + $logtype_info + +
    TypeCountPercentage
    +
    +
    +
    +
    +
    +}; + delete $drawn_graphs{logstype_graph}; - # Show log types - print $fh qq{ -

    Logs per type ^

    - -
    - - - - - - - }; +} - my $total_logs = 0; - foreach my $d (sort keys %logs_type) { - $total_logs += $logs_type{$d}; - } +sub show_error_as_html +{ - my $c = 0; + my $main_error = 0; + my $total = 0; + foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { + next if (!$error_info{$k}{count}); + $main_error = &comma_numbers($error_info{$k}{count}) if (!$main_error); + $total += $error_info{$k}{count}; + } + $total = &comma_numbers($total); - foreach my $d (sort keys %logs_type) { - next if (!$logs_type{$d}); - my $colb = $c % 2; - print $fh "\n"; - $c++; + print $fh qq{ +
    +

    Most Frequent Errors/Events

    +
    +

    Key values

    +
    +
      +
    • $main_error Max number of times the same error was reported
    • +
    • $total Total errors found
    • +
    +
    +
    +
    +
    TypeCountPercentage
    $d", &comma_numbers($logs_type{$d}), - "", sprintf("%0.2f", ($logs_type{$d} * 100) / $total_logs), "%
    + + + + + + + + +}; + my $rank = 1; + foreach my $k (sort {$error_info{$b}{count} <=> $error_info{$a}{count}} keys %error_info) { + next if (!$error_info{$k}{count}); + my $count = &comma_numbers($error_info{$k}{count}); + my $msg = $k; + $msg =~ s/ERROR: (parameter "[^"]+" changed to)/LOG: $1/; + $msg =~ s/ERROR: (database system was shut down)/LOG: $1/; + $msg =~ s/ERROR: (database system was interrupted while in recovery)/LOG: $1/; + $msg =~ s/ERROR: (recovery has paused)/LOG: $1/; + my $error_level_class = 'text-error'; + if ($msg =~ /^WARNING: /) { + $error_level_class = 'text-warning'; + } elsif ($msg =~ /^LOG: /) { + $error_level_class = 'text-success'; + } elsif ($msg =~ /^HINT: /) { + $error_level_class = 'text-info'; + } elsif ($msg =~ /^FATAL: /) { + $error_level_class = 'text-fatal'; + } elsif ($msg =~ /^PANIC: /) { + $error_level_class = 'text-panic'; + } + # Escape HTML code in error message + $msg = &escape_html($msg); + print $fh qq{ + + + +
    RankTimes reportedError
    $rank$count +

    Details

    +
    +
    $msg
    + +
    +

    Times Reported Most Frequent Error / Event #$rank

    + + + + + + + + + +}; + my $details = ''; + foreach my $d (sort keys %{$error_info{$k}{chronos}}) { + my $c = 1; + $d =~ /^\d{4}(\d{2})(\d{2})$/; + my $zday = "$abbr_month{$1} $2"; + foreach my $h (sort keys %{$error_info{$k}{chronos}{$d}}) { + $details .= ""; + $zday = ""; + } } + print $fh qq{ + $details + +
    DayHourCount
    $zday$h" . + &comma_numbers($error_info{$k}{chronos}{$d}{$h}{count}) . "
    +

    +
    +

    + +
    +
    +}; - print $fh "
    \n"; - if ($graph && $total_logs) { - my %infos = (); - my @small = (); - foreach my $d (sort keys %logs_type) { - if ((($logs_type{$d} * 100) / $total_logs) > $pie_percentage_limit) { - $infos{$d} = $logs_type{$d} || 0; - } else { - $infos{"Sum log types < $pie_percentage_limit%"} += $logs_type{$d} || 0; - push(@small, $d); - } + for (my $i = 0 ; $i <= $#{$error_info{$k}{date}} ; $i++) { + # Escape HTML code in error message + my $message = &escape_html($error_info{$k}{error}[$i]); + my $details = "Date: " . $error_info{$k}{date}[$i] . "\n"; + if ($error_info{$k}{detail}[$i]) { + $details .= "Detail: " . &escape_html($error_info{$k}{detail}[$i]) . "
    "; } - - if ($#small == 0) { - $infos{$small[0]} = $infos{"Sum log types < $pie_percentage_limit%"}; - delete $infos{"Sum log types < $pie_percentage_limit%"}; + if ($error_info{$k}{context}[$i]) { + $details .= "Context: " . &escape_html($error_info{$k}{context}[$i]) . "
    "; + } + if ($error_info{$k}{hint}[$i]) { + $details .= "Hint: " . &escape_html($error_info{$k}{hint}[$i]) . "
    "; } - &flotr2_piegraph($graphid++, 'logstype_graph', 'Logs per type', %infos); + if ($error_info{$k}{statement}[$i]) { + $details .= "Statement: " . &escape_html($error_info{$k}{statement}[$i]) . "
    "; + } + if ($error_info{$k}{db}[$i]) { + $details .= "Database: " . $error_info{$k}{db}[$i] . "
    "; + } + print $fh qq{ +
    $message
    +
    $details
    +}; } - print $fh "
    \n"; + print $fh qq{ + +

    + + + + +}; + $rank++; } + if (scalar keys %error_info == 0) { + print $fh qq{$NODATA}; + } + + print $fh qq{ + + + + +}; } @@ -4608,13 +6416,15 @@ sub load_stats my $fd = shift; my %stats = %{ fd_retrieve($fd) }; my %_overall_stat = %{$stats{overall_stat}}; + my %_overall_checkpoint = %{$stats{overall_checkpoint}}; my %_normalyzed_info = %{$stats{normalyzed_info}}; my %_error_info = %{$stats{error_info}}; my %_connection_info = %{$stats{connection_info}}; my %_database_info = %{$stats{database_info}}; my %_application_info = %{$stats{application_info}}; + my %_user_info = %{$stats{user_info}}; + my %_host_info = %{$stats{host_info}}; my %_checkpoint_info = %{$stats{checkpoint_info}}; - my %_restartpoint_info = %{$stats{restartpoint_info}}; my %_session_info = %{$stats{session_info}}; my %_tempfile_info = %{$stats{tempfile_info}}; my %_logs_type = %{$stats{logs_type}}; @@ -4664,13 +6474,26 @@ sub load_stats $overall_stat{SELECT} += $_overall_stat{SELECT} if exists $_overall_stat{SELECT}; - foreach my $k (keys %{$_overall_stat{query_peak}}) { - $overall_stat{query_peak}{$k} += $_overall_stat{query_peak}{$k}; + $overall_checkpoint{checkpoint_warning} += $_overall_checkpoint{checkpoint_warning}; + $overall_checkpoint{checkpoint_write} = $_overall_checkpoint{checkpoint_write} + if ($_overall_checkpoint{checkpoint_write} > $overall_checkpoint{checkpoint_write}); + $overall_checkpoint{checkpoint_sync} = $_overall_checkpoint{checkpoint_sync} + if ($_overall_checkpoint{checkpoint_sync} > $overall_checkpoint{checkpoint_sync}); + foreach my $k (keys %{$_overall_stat{peak}}) { + $overall_stat{peak}{$k}{query} += $_overall_stat{peak}{$k}{query}; + $overall_stat{peak}{$k}{select} += $_overall_stat{peak}{$k}{select}; + $overall_stat{peak}{$k}{write} += $_overall_stat{peak}{$k}{write}; + $overall_stat{peak}{$k}{connection} += $_overall_stat{peak}{$k}{connection}; + $overall_stat{peak}{$k}{tempfile_size} += $_overall_stat{peak}{$k}{tempfile_size}; + $overall_stat{peak}{$k}{tempfile_count} += $_overall_stat{peak}{$k}{tempfile_count}; + } + foreach my $k (keys %{$_overall_checkpoint{peak}}) { + $overall_checkpoint{peak}{$k}{checkpoint_wbuffer} += $_overall_checkpoint{peak}{$k}{checkpoint_wbuffer}; + $overall_checkpoint{peak}{$k}{walfile_usage} += $_overall_checkpoint{peak}{$k}{walfile_usage}; } - # FIXME == $error_info ?? - foreach my $k (keys %{$_overall_stat{unique_normalized_errors}}) { - $overall_stat{unique_normalized_errors}{$k} += $_overall_stat{unique_normalized_errors}{$k}; + foreach my $k ('prepare', 'bind','execute') { + $overall_stat{$k} += $_overall_stat{$k}; } @@ -4695,6 +6518,23 @@ sub load_stats } } + ### user_info ### + + foreach my $u (keys %_user_info) { + foreach my $k (keys %{ $_user_info{$u} }) { + $user_info{$u}{$k} += $_user_info{$u}{$k}; + } + } + + ### host_info ### + + foreach my $h (keys %_host_info) { + foreach my $k (keys %{ $_host_info{$h} }) { + $host_info{$h}{$k} += $_host_info{$h}{$k}; + } + } + + ### connection_info ### foreach my $db (keys %{ $_connection_info{database} }) { @@ -4799,6 +6639,11 @@ sub load_stats $per_minute_info{$day}{$hour}{$min}{$action}{duration} += $_per_minute_info{$day}{$hour}{$min}{$action}{duration}; } } + foreach my $k ('prepare', 'bind','execute') { + if (exists $_per_minute_info{$day}{$hour}{$min}{$k}) { + $per_minute_info{$day}{$hour}{$min}{$k} += $_per_minute_info{$day}{$hour}{$min}{$k}; + } + } $per_minute_info{$day}{$hour}{$min}{tempfile}{count} += $_per_minute_info{$day}{$hour}{$min}{tempfile}{count} if defined $_per_minute_info{$day}{$hour}{$min}{tempfile}{count}; @@ -4812,12 +6657,13 @@ sub load_stats $per_minute_info{$day}{$hour}{$min}{checkpoint}{total} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{total}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{file_added} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{file_added}; $per_minute_info{$day}{$hour}{$min}{checkpoint}{write} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{write}; - $per_minute_info{$day}{$hour}{$min}{restartpoint}{sync} += $_per_minute_info{$day}{$hour}{$min}{restartpoint}{sync}; - $per_minute_info{$day}{$hour}{$min}{restartpoint}{wbuffer} += $_per_minute_info{$day}{$hour}{$min}{restartpoint}{wbuffer}; - $per_minute_info{$day}{$hour}{$min}{restartpoint}{total} += $_per_minute_info{$day}{$hour}{$min}{restartpoint}{total}; - $per_minute_info{$day}{$hour}{$min}{restartpoint}{write} += $_per_minute_info{$day}{$hour}{$min}{restartpoint}{write}; $per_minute_info{$day}{$hour}{$min}{autovacuum}{count} += $_per_minute_info{$day}{$hour}{$min}{autovacuum}{count}; $per_minute_info{$day}{$hour}{$min}{autoanalyze}{count} += $_per_minute_info{$day}{$hour}{$min}{autoanalyze}{count}; + + $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_files} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_files}; + $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_avg} += $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_avg}; + $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest} = $_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest} + if ($_per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest} > $per_minute_info{$day}{$hour}{$min}{checkpoint}{sync_longest}); } } } @@ -4951,24 +6797,24 @@ sub load_stats my @tmp_top_slowest = sort {$b->[0] <=> $a->[0]} (@top_slowest, @_top_slowest); @top_slowest = (); for (my $i = 0; $i <= $#tmp_top_slowest; $i++) { - last if ($i == $end_top); push(@top_slowest, $tmp_top_slowest[$i]); + last if ($i == $end_top); } ### top_locked ### my @tmp_top_locked_info = sort {$b->[0] <=> $a->[0]} (@top_locked_info, @_top_locked_info); @top_locked_info = (); for (my $i = 0; $i <= $#tmp_top_locked_info; $i++) { - last if ($i == $end_top); push(@top_locked_info, $tmp_top_locked_info[$i]); + last if ($i == $end_top); } ### top_tempfile ### my @tmp_top_tempfile_info = sort {$b->[0] <=> $a->[0]} (@top_tempfile_info, @_top_tempfile_info); @top_tempfile_info = (); for (my $i = 0; $i <= $#tmp_top_tempfile_info; $i++) { - last if ($i == $end_top); push(@top_tempfile_info, $tmp_top_tempfile_info[$i]); + last if ($i == $end_top); } ### checkpoint_info ### @@ -4980,12 +6826,6 @@ sub load_stats $checkpoint_info{file_added} += $_checkpoint_info{file_added}; $checkpoint_info{write} += $_checkpoint_info{write}; - ### restartpoint_info ### - $restartpoint_info{sync} += $_restartpoint_info{sync}; - $restartpoint_info{wbuffer} += $_restartpoint_info{wbuffer}; - $restartpoint_info{total} += $_restartpoint_info{total}; - $restartpoint_info{write} += $_restartpoint_info{write}; - #### Autovacuum infos #### $autovacuum_info{count} += $_autovacuum_info{count}; @@ -5001,7 +6841,11 @@ sub load_stats $autovacuum_info{tables}{$table}{tuples}{removed} += $_autovacuum_info{tables}{$table}{tuples}{removed}; $autovacuum_info{tables}{$table}{pages}{removed} += $_autovacuum_info{tables}{$table}{pages}{removed}; } - + if ($_autovacuum_info{peak}{system_usage}{elapsed} > $autovacuum_info{peak}{system_usage}{elapsed}) { + $autovacuum_info{peak}{system_usage}{elapsed} = $_autovacuum_info{peak}{system_usage}{elapsed}; + $autovacuum_info{peak}{system_usage}{table} = $_autovacuum_info{peak}{system_usage}{table}; + $autovacuum_info{peak}{system_usage}{date} = $_autovacuum_info{peak}{system_usage}{date}; + } #### Autoanalyze infos #### $autoanalyze_info{count} += $_autoanalyze_info{count}; @@ -5014,6 +6858,11 @@ sub load_stats foreach my $table (keys %{ $_autoanalyze_info{tables} }) { $autoanalyze_info{tables}{$table}{analyzes} += $_autoanalyze_info{tables}{$table}{analyzes}; } + if ($_autoanalyze_info{peak}{system_usage}{elapsed} > $autoanalyze_info{peak}{system_usage}{elapsed}) { + $autoanalyze_info{peak}{system_usage}{elapsed} = $_autoanalyze_info{peak}{system_usage}{elapsed}; + $autoanalyze_info{peak}{system_usage}{table} = $_autoanalyze_info{peak}{system_usage}{table}; + $autoanalyze_info{peak}{system_usage}{date} = $_autoanalyze_info{peak}{system_usage}{date}; + } return; } @@ -5024,13 +6873,15 @@ sub dump_as_binary store_fd({ 'overall_stat' => \%overall_stat, + 'overall_checkpoint' => \%overall_checkpoint, 'normalyzed_info' => \%normalyzed_info, 'error_info' => \%error_info, 'connection_info' => \%connection_info, 'database_info' => \%database_info, 'application_info' => \%application_info, + 'user_info' => \%user_info, + 'host_info' => \%host_info, 'checkpoint_info' => \%checkpoint_info, - 'restartpoint_info' => \%restartpoint_info, 'session_info' => \%session_info, 'tempfile_info' => \%tempfile_info, 'error_info' => \%error_info, @@ -5272,6 +7123,8 @@ sub parse_query } my $date_part = "$prefix_vars{'t_year'}$prefix_vars{'t_month'}$prefix_vars{'t_day'}"; + my $cur_last_log_timestamp = "$prefix_vars{'t_year'}-$prefix_vars{'t_month'}-$prefix_vars{'t_day'} " . + "$prefix_vars{t_hour}:$prefix_vars{t_min}:$prefix_vars{t_sec}"; # Stores lock activity if (($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ /acquired ([^\s]+) on ([^\s]+) .* after ([0-9\.]+) ms/)) @@ -5293,7 +7146,6 @@ sub parse_query $cur_lock_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_lock_info{$t_pid}{dbclient} = $prefix_vars{'t_client'}; $cur_lock_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; - $cur_lock_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; } return; } @@ -5306,7 +7158,6 @@ sub parse_query $cur_lock_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_lock_info{$t_pid}{dbclient} = $prefix_vars{'t_client'}; $cur_lock_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; - $cur_lock_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; return; } @@ -5321,6 +7172,8 @@ sub parse_query # Store current temporary file information that will be used later # when we will parse the query responsible of the tempfile $cur_temp_info{$t_pid}{size} = $1; + $overall_stat{'peak'}{$cur_last_log_timestamp}{tempfile_size} += $1; + $overall_stat{'peak'}{$cur_last_log_timestamp}{tempfile_count}++; if ($format eq 'csv') { $cur_temp_info{$t_pid}{query} = $prefix_vars{'t_statement'}; $cur_temp_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; @@ -5328,20 +7181,18 @@ sub parse_query $cur_temp_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_temp_info{$t_pid}{dbclient} = $prefix_vars{'t_client'}; $cur_temp_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; - $cur_temp_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; } return; } # Stores query related to last created temporary file - if (($prefix_vars{'t_loglevel'} eq 'STATEMENT') && exists $cur_temp_info{$t_pid}) { + if (($prefix_vars{'t_loglevel'} eq 'STATEMENT') && $cur_temp_info{$t_pid}{size}) { $cur_temp_info{$t_pid}{query} = $prefix_vars{'t_query'}; $cur_temp_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; $cur_temp_info{$t_pid}{dbname} = $prefix_vars{'t_dbname'}; $cur_temp_info{$t_pid}{dbuser} = $prefix_vars{'t_dbuser'}; $cur_temp_info{$t_pid}{dbclient} = $prefix_vars{'t_client'}; $cur_temp_info{$t_pid}{dbappname} = $prefix_vars{'t_appname'}; - $cur_temp_info{$t_pid}{timestamp} = $prefix_vars{'t_timestamp'}; return; } @@ -5366,6 +7217,8 @@ sub parse_query return; } + $overall_stat{'peak'}{$cur_last_log_timestamp}{connection}++; + $connection_info{count}++; $connection_info{user}{$usr}++; $connection_info{database}{$db}++; @@ -5440,6 +7293,12 @@ sub parse_query $autovacuum_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{autovacuum}{count}++; $cur_info{$t_pid}{vacuum} = $1; + $cur_info{$t_pid}{year} = $prefix_vars{'t_year'}; + $cur_info{$t_pid}{month} = $prefix_vars{'t_month'}; + $cur_info{$t_pid}{day} = $prefix_vars{'t_day'}; + $cur_info{$t_pid}{hour} = $prefix_vars{'t_hour'}; + $cur_info{$t_pid}{min} = $prefix_vars{'t_min'}; + $cur_info{$t_pid}{sec} = $prefix_vars{'t_sec'}; return; } @@ -5452,31 +7311,41 @@ sub parse_query ) { return if ($disable_autovacuum); + my $table = $1; $autoanalyze_info{count}++; - $autoanalyze_info{tables}{$1}{analyzes} += 1; + $autoanalyze_info{tables}{$table}{analyzes} += 1; $autoanalyze_info{chronos}{$date_part}{$prefix_vars{'t_hour'}}{count}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{autoanalyze}{count}++; + if ($prefix_vars{'t_query'} =~ m#system usage: CPU .* sec elapsed (.*) sec#) { + if ($1 > $autoanalyze_info{peak}{system_usage}{elapsed}) { + $autoanalyze_info{peak}{system_usage}{elapsed} = $1; + $autoanalyze_info{peak}{system_usage}{table} = $table; + $autoanalyze_info{peak}{system_usage}{date} = $cur_last_log_timestamp; + } + } } - # Store checkpoint information + # Store checkpoint or restartpoint information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ -/checkpoint complete: wrote (\d+) buffers \(([^\)]+)\); (\d+) transaction log file\(s\) added, (\d+) removed, (\d+) recycled; write=([0-9\.]+) s, sync=([0-9\.]+) s, total=([0-9\.]+) s/ +/point complete: wrote (\d+) buffers \(([^\)]+)\); (\d+) transaction log file\(s\) added, (\d+) removed, (\d+) recycled; write=([0-9\.]+) s, sync=([0-9\.]+) s, total=([0-9\.]+) s/ ) ) { + # Example: LOG: checkpoint complete: wrote 175 buffers (5.7%); 0 transaction log file(s) added, 1 removed, 2 recycled; write=17.437 s, sync=0.722 s, total=18.259 s; sync files=2, longest=0.708 s, average=0.361 s return if ($disable_checkpoint); + $checkpoint_info{wbuffer} += $1; #$checkpoint_info{percent_wbuffer} += $2; $checkpoint_info{file_added} += $3; $checkpoint_info{file_removed} += $4; $checkpoint_info{file_recycled} += $5; + $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{walfile_usage} += ($3 + $5); $checkpoint_info{write} += $6; $checkpoint_info{sync} += $7; $checkpoint_info{total} += $8; - $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{wbuffer} += $1; #$per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{percent_wbuffer} += $2; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{file_added} += $3; @@ -5485,6 +7354,19 @@ sub parse_query $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{write} += $6; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync} += $7; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{total} += $8; + + $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{checkpoint_wbuffer} += $1; + if ($6 > $overall_checkpoint{checkpoint_write}) { + $overall_checkpoint{checkpoint_write} = $6; + $overall_checkpoint{checkpoint_sync} = $7; + } + + if ($prefix_vars{'t_query'} =~ /sync files=(\d+), longest=([0-9\.]+) s, average=([0-9\.]+) s/) { + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_files} += $1; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_longest} = $2 + if ($2 > $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_longest}); + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync_avg} += $3; + } return; } @@ -5497,10 +7379,11 @@ sub parse_query $checkpoint_info{warning_seconds} += $1; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{warning}++; $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{warning_seconds} += $1; + $overall_checkpoint{checkpoint_warning}++; return; } - # Store restartpoint information + # Store old restartpoint information if ( ($prefix_vars{'t_loglevel'} eq 'LOG') && ($prefix_vars{'t_query'} =~ @@ -5508,20 +7391,27 @@ sub parse_query ) ) { + # Example: LOG: restartpoint complete: wrote 1568 buffers (0.3%); write=146.237 s, sync=0.251 s, total=146.489 s return if ($disable_checkpoint); - $restartpoint_info{wbuffer} += $1; - #$restartpoint_info{percent_wbuffer} += $2; - $restartpoint_info{write} += $3; - $restartpoint_info{sync} += $4; - $restartpoint_info{total} += $5; + $checkpoint_info{wbuffer} += $1; + + #$checkpoint_info{percent_wbuffer} += $2; + $checkpoint_info{write} += $6; + $checkpoint_info{sync} += $7; + $checkpoint_info{total} += $8; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{wbuffer} += $1; + #$per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{percent_wbuffer} += $2; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{write} += $6; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{sync} += $7; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{checkpoint}{total} += $8; - $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{restartpoint}{wbuffer} += $1; + $overall_checkpoint{'peak'}{$cur_last_log_timestamp}{checkpoint_wbuffer} += $1; + if ($6 > $overall_checkpoint{checkpoint_write}) { + $overall_checkpoint{checkpoint_write} = $6; + $overall_checkpoint{checkpoint_sync} = $7; + } - #$per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{restartpoint}{percent_wbuffer} += $2; - $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{restartpoint}{write} += $3; - $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{restartpoint}{sync} += $4; - $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{restartpoint}{total} += $5; return; } @@ -5584,20 +7474,36 @@ sub parse_query if ($prefix_vars{'t_query'} =~ s/duration: ([0-9\.]+) ms (query|statement): //is) { $prefix_vars{'t_duration'} = $1; $t_action = $2; + if (($t_action eq 'statement') && $prefix_vars{'t_query'} =~ /^(PREPARE|EXECUTE)\b/i) { + $overall_stat{lc($1)}++; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{lc($1)}++; + } # Log line with duration and statement from prepared queries } elsif ($prefix_vars{'t_query'} =~ s/duration: ([0-9\.]+) ms (prepare|parse|bind|execute from fetch|execute)\s+[^:]+:\s//is) { $prefix_vars{'t_duration'} = $1; - $t_action = $2; + $t_action = $2; + $t_action =~ s/ from fetch//; + $t_action = 'prepare' if ($t_action eq 'parse'); + $overall_stat{$t_action}++; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{$t_action}++; # Skipping parse and bind logs return if ($t_action !~ /query|statement|execute/); # Log line without duration at all } elsif ($prefix_vars{'t_query'} =~ s/(query|statement): //is) { $t_action = $1; + if (($t_action eq 'statement') && $prefix_vars{'t_query'} =~ /^(PREPARE|EXECUTE)\b/i) { + $overall_stat{lc($1)}++; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{lc($1)}++; + } # Log line without duration at all from prepared queries } elsif ($prefix_vars{'t_query'} =~ s/(prepare|parse|bind|execute from fetch|execute)\s+[^:]+:\s//is) { $t_action = $1; + $t_action =~ s/ from fetch//; + $t_action = 'prepare' if ($t_action eq 'parse'); + $overall_stat{$t_action}++; + $per_minute_info{$date_part}{$prefix_vars{'t_hour'}}{$prefix_vars{'t_min'}}{$t_action}++; # Skipping parse and bind logs return if ($t_action !~ /query|statement|execute/); # Log line that would not be parse @@ -5701,7 +7607,7 @@ sub store_tsung_session sub store_queries { - my $t_pid = shift; + my $t_pid = shift; # Remove comments if required if ($remove_comment) { @@ -5712,6 +7618,7 @@ sub store_queries &store_temporary_and_lock_infos($t_pid); return if (!exists $cur_info{$t_pid}); + return if (!$cur_info{$t_pid}{year}); # Cleanup and normalize the current query $cur_info{$t_pid}{query} =~ s/^[\t\s\r\n]+//s; @@ -5784,7 +7691,6 @@ sub store_queries # Stores total and normalized error count $overall_stat{'errors_number'}++; - $overall_stat{'unique_normalized_errors'}{"$normalized_error"}++; $error_info{$normalized_error}{count}++; # Stores normalized error count per time @@ -5813,7 +7719,7 @@ sub store_queries if (!$overall_stat{'last_query_ts'} || ($overall_stat{'last_query_ts'} lt $cur_last_log_timestamp)) { $overall_stat{'last_query_ts'} = $cur_last_log_timestamp; } - $overall_stat{'query_peak'}{$cur_last_log_timestamp}++; + $overall_stat{'peak'}{$cur_last_log_timestamp}{query}++; if ($graph) { $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{query}{count}++; @@ -5831,11 +7737,23 @@ sub store_queries # Counter per database and application name if ($cur_info{$t_pid}{dbname}) { $database_info{$cur_info{$t_pid}{dbname}}{count}++; + } else { + $database_info{'unknown'}{count}++; } if ($cur_info{$t_pid}{dbappname}) { $application_info{$cur_info{$t_pid}{dbappname}}{count}++; } else { - $application_info{others}{count}++; + $application_info{'unknown'}{count}++; + } + if ($cur_info{$t_pid}{dbuser}) { + $user_info{$cur_info{$t_pid}{dbuser}}{count}++; + } else { + $user_info{'unknown'}{count}++; + } + if ($cur_info{$t_pid}{dbclient}) { + $host_info{$cur_info{$t_pid}{dbclient}}{count}++; + } else { + $host_info{'unknown'}{count}++; } if ($cur_info{$t_pid}{query}) { @@ -5849,15 +7767,32 @@ sub store_queries if ($normalized =~ $act) { my $action = uc($1); $overall_stat{$action}++; + if ($action eq 'SELECT') { + $overall_stat{'peak'}{$cur_last_log_timestamp}{select}++; + } else { + $overall_stat{'peak'}{$cur_last_log_timestamp}{write}++; + } $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{$action}{count}++; $per_minute_info{"$cur_day_str"}{"$cur_hour_str"}{$cur_info{$t_pid}{min}}{$action}{duration} += $cur_info{$t_pid}{duration} if ($cur_info{$t_pid}{duration}); if ($cur_info{$t_pid}{dbname}) { $database_info{$cur_info{$t_pid}{dbname}}{$action}++; + } else { + $database_info{'unknown'}{$action}++; } if ($cur_info{$t_pid}{dbappname}) { $application_info{$cur_info{$t_pid}{dbappname}}{$action}++; } else { - $application_info{others}{$action}++; + $application_info{'unknown'}{$action}++; + } + if ($cur_info{$t_pid}{dbuser}) { + $user_info{$cur_info{$t_pid}{dbuser}}{count}++; + } else { + $user_info{'unknown'}{count}++; + } + if ($cur_info{$t_pid}{dbclient}) { + $host_info{$cur_info{$t_pid}{dbclient}}{count}++; + } else { + $host_info{'unknown'}{count}++; } last; } @@ -5901,7 +7836,7 @@ sub store_temporary_and_lock_infos return if (!$t_pid); # Store normalized query temp file size if required - if (exists $cur_temp_info{$t_pid} && ($cur_temp_info{$t_pid}{query} ne '') ) { + if (exists $cur_temp_info{$t_pid} && ($cur_temp_info{$t_pid}{query} ne '') && $cur_temp_info{$t_pid}{size}) { # Add a semi-colon at end of the query $cur_temp_info{$t_pid}{query} .= ';' if (substr($cur_temp_info{$t_pid}{query}, -1, 1) ne ';'); @@ -5921,11 +7856,10 @@ sub store_temporary_and_lock_infos } &set_top_tempfile_info($cur_temp_info{$t_pid}{query}, $cur_temp_info{$t_pid}{size}, $cur_temp_info{$t_pid}{timestamp}, $cur_temp_info{$t_pid}{dbname}, $cur_temp_info{$t_pid}{dbuser}, $cur_temp_info{$t_pid}{dbclient}, $cur_temp_info{$t_pid}{dbappname}); delete $cur_temp_info{$t_pid}; - #delete $cur_info{$t_pid}; } # Store normalized query that waited the most if required - if (exists $cur_lock_info{$t_pid} && ($cur_lock_info{$t_pid}{query} ne '')) { + if (exists $cur_lock_info{$t_pid} && ($cur_lock_info{$t_pid}{query} ne '') && $cur_lock_info{$t_pid}{wait}) { # Add a semi-colon at end of the query $cur_lock_info{$t_pid}{query} .= ';' if (substr($cur_lock_info{$t_pid}{query}, -1, 1) ne ';'); @@ -5944,7 +7878,6 @@ sub store_temporary_and_lock_infos } &set_top_locked_info($cur_lock_info{$t_pid}{query}, $cur_lock_info{$t_pid}{wait}, $cur_lock_info{$t_pid}{timestamp}, $cur_lock_info{$t_pid}{dbname}, $cur_lock_info{$t_pid}{dbuser}, $cur_lock_info{$t_pid}{dbclient}, $cur_lock_info{$t_pid}{dbappname}); delete $cur_lock_info{$t_pid}; - #delete $cur_info{$t_pid}; } } @@ -6106,19 +8039,24 @@ sub flotr2_graph { my ($buttonid, $divid, $data1, $data2, $data3, $title, $ytitle, $legend1, $legend2, $legend3, $ytitle2, $data4, $legend4) = @_; + if (!$data1) { + return qq{ +
    NO DATASET
    +}; + } $data1 = "var d1 = [$data1];" if ($data1); $data2 = "var d2 = [$data2];" if ($data2); $data3 = "var d3 = [$data3];" if ($data3); $data4 = "var d4 = [$data4];" if ($data4); - $legend1 = "{ data: d1, label: \"$legend1\", mouse:{track:true}}," if ($legend1); - $legend2 = "{ data: d2, label: \"$legend2\", mouse:{track:true}}," if ($legend2); - $legend3 = "{ data: d3, label: \"$legend3\", mouse:{track:true}}," if ($legend3); - $legend4 = "{ data: d4, label: \"$legend4\",yaxis: 2}," if ($legend4); + $legend1 = "{ data: d1, label: \"$legend1\", color: \"#6e9dc9\", mouse:{track:true}}," if ($legend1); + $legend2 = "{ data: d2, label: \"$legend2\", color: \"#f4ab3a\", mouse:{track:true}}," if ($legend2); + $legend3 = "{ data: d3, label: \"$legend3\", color: \"#ac7fa8\", mouse:{track:true}}," if ($legend3); + $legend4 = "{ data: d4, label: \"$legend4\", color: \"#8dbd0f\",yaxis: 2}," if ($legend4); my $yaxis2 = ''; if ($ytitle2) { - $yaxis2 = "y2axis: { mode: \"normal\", title: \"$ytitle2\", min: 0, color: \"#4DA74D\" },"; + $yaxis2 = "y2axis: { mode: \"normal\", title: \"$ytitle2\", min: 0, color: \"#8dbd0f\" },"; } my $type = ''; @@ -6128,15 +8066,15 @@ sub flotr2_graph $type = 'duration'; } - print $fh < + return < + + + + + + + + + -- 2.40.0