From f04d4ac919b9ae9b57e977523e4b40979aa8b951 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 25 Apr 2018 14:00:19 -0400 Subject: [PATCH] Reindent Perl files with perltidy version 20170521. Discussion: https://postgr.es/m/CABUevEzK3cNiHZQ18f5tK0guoT+cN_jWeVzhYYxY=r+1Q3SmoA@mail.gmail.com --- doc/src/sgml/mk_feature_tables.pl | 4 +- src/backend/catalog/Catalog.pm | 298 ++-- src/backend/catalog/genbki.pl | 83 +- src/backend/utils/Gen_fmgrtab.pl | 14 +- src/bin/initdb/t/001_initdb.pl | 10 +- .../t/010_pg_archivecleanup.pl | 77 +- src/bin/pg_basebackup/t/010_pg_basebackup.pl | 138 +- src/bin/pg_basebackup/t/020_pg_receivewal.pl | 8 +- .../pg_controldata/t/001_pg_controldata.pl | 17 +- src/bin/pg_ctl/t/001_start_stop.pl | 6 +- src/bin/pg_dump/t/002_pg_dump.pl | 1502 +++++++---------- src/bin/pg_resetwal/t/001_basic.pl | 6 +- src/bin/pg_resetwal/t/002_corrupted.pl | 28 +- src/bin/pg_rewind/RewindTest.pm | 26 +- src/bin/pg_rewind/t/001_basic.pl | 5 +- src/bin/pg_rewind/t/002_databases.pl | 5 +- src/bin/pgbench/t/001_pgbench_with_server.pl | 132 +- src/bin/pgbench/t/002_pgbench_no_server.pl | 67 +- src/bin/scripts/t/080_pg_isready.pl | 3 +- src/bin/scripts/t/100_vacuumdb.pl | 13 +- src/include/catalog/duplicate_oids | 10 +- src/include/catalog/reformat_dat_file.pl | 15 +- src/test/kerberos/t/001_auth.pl | 78 +- src/test/ldap/t/001_auth.pl | 142 +- src/test/modules/brin/t/01_workitems.pl | 12 +- src/test/modules/test_pg_dump/t/001_base.pl | 103 +- src/test/perl/PostgresNode.pm | 16 +- src/test/perl/RecursiveCopy.pm | 2 +- src/test/perl/TestLib.pm | 72 +- src/test/recovery/t/006_logical_decoding.pl | 3 +- src/test/recovery/t/009_twophase.pl | 4 +- src/test/recovery/t/013_crash_restart.pl | 91 +- src/test/recovery/t/014_unlogged_reinit.pl | 11 +- src/test/ssl/ServerSetup.pm | 14 +- src/test/ssl/t/001_ssltests.pl | 314 ++-- src/test/ssl/t/002_scram.pl | 35 +- src/test/subscription/t/001_rep_changes.pl | 12 +- src/test/subscription/t/005_encoding.pl | 2 +- src/test/subscription/t/006_rewrite.pl | 20 +- src/test/subscription/t/007_ddl.pl | 3 +- src/test/subscription/t/008_diff_schema.pl | 34 +- src/test/subscription/t/009_matviews.pl | 13 +- src/test/subscription/t/010_truncate.pl | 67 +- src/tools/git_changelog | 6 +- src/tools/msvc/Install.pm | 10 +- src/tools/msvc/Mkvcbuild.pm | 25 +- src/tools/msvc/Project.pm | 2 +- src/tools/msvc/Solution.pm | 30 +- src/tools/msvc/VSObjectFactory.pm | 15 +- src/tools/msvc/gendef.pl | 4 +- src/tools/msvc/vcregress.pl | 9 +- src/tools/pginclude/pgcheckdefines | 2 +- src/tools/pgindent/pgindent | 9 +- 53 files changed, 1802 insertions(+), 1825 deletions(-) diff --git a/doc/src/sgml/mk_feature_tables.pl b/doc/src/sgml/mk_feature_tables.pl index 9b111b8b40..476e50e66d 100644 --- a/doc/src/sgml/mk_feature_tables.pl +++ b/doc/src/sgml/mk_feature_tables.pl @@ -38,8 +38,8 @@ while (<$feat>) $is_supported eq $yesno || next; - $feature_name =~ s//>/g; + $feature_name =~ s//>/g; $subfeature_name =~ s//>/g; diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm index 6305a2b362..ce425562f5 100644 --- a/src/backend/catalog/Catalog.pm +++ b/src/backend/catalog/Catalog.pm @@ -33,185 +33,186 @@ sub ParseHeader 'TransactionId' => 'xid', 'XLogRecPtr' => 'pg_lsn'); - my %catalog; - my $declaring_attributes = 0; - my $is_varlen = 0; - my $is_client_code = 0; + my %catalog; + my $declaring_attributes = 0; + my $is_varlen = 0; + my $is_client_code = 0; - $catalog{columns} = []; - $catalog{toasting} = []; - $catalog{indexing} = []; - $catalog{client_code} = []; + $catalog{columns} = []; + $catalog{toasting} = []; + $catalog{indexing} = []; + $catalog{client_code} = []; - open(my $ifh, '<', $input_file) || die "$input_file: $!"; + open(my $ifh, '<', $input_file) || die "$input_file: $!"; - # Scan the input file. - while (<$ifh>) - { + # Scan the input file. + while (<$ifh>) + { - # Set appropriate flag when we're in certain code sections. - if (/^#/) + # Set appropriate flag when we're in certain code sections. + if (/^#/) + { + $is_varlen = 1 if /^#ifdef\s+CATALOG_VARLEN/; + if (/^#ifdef\s+EXPOSE_TO_CLIENT_CODE/) { - $is_varlen = 1 if /^#ifdef\s+CATALOG_VARLEN/; - if (/^#ifdef\s+EXPOSE_TO_CLIENT_CODE/) - { - $is_client_code = 1; - next; - } - next if !$is_client_code; + $is_client_code = 1; + next; } + next if !$is_client_code; + } - if (!$is_client_code) + if (!$is_client_code) + { + # Strip C-style comments. + s;/\*(.|\n)*\*/;;g; + if (m;/\*;) { - # Strip C-style comments. - s;/\*(.|\n)*\*/;;g; - if (m;/\*;) - { - - # handle multi-line comments properly. - my $next_line = <$ifh>; - die "$input_file: ends within C-style comment\n" - if !defined $next_line; - $_ .= $next_line; - redo; - } - # Strip useless whitespace and trailing semicolons. - chomp; - s/^\s+//; - s/;\s*$//; - s/\s+/ /g; + # handle multi-line comments properly. + my $next_line = <$ifh>; + die "$input_file: ends within C-style comment\n" + if !defined $next_line; + $_ .= $next_line; + redo; } - # Push the data into the appropriate data structure. - if (/^DECLARE_TOAST\(\s*(\w+),\s*(\d+),\s*(\d+)\)/) + # Strip useless whitespace and trailing semicolons. + chomp; + s/^\s+//; + s/;\s*$//; + s/\s+/ /g; + } + + # Push the data into the appropriate data structure. + if (/^DECLARE_TOAST\(\s*(\w+),\s*(\d+),\s*(\d+)\)/) + { + my ($toast_name, $toast_oid, $index_oid) = ($1, $2, $3); + push @{ $catalog{toasting} }, + "declare toast $toast_oid $index_oid on $toast_name\n"; + } + elsif (/^DECLARE_(UNIQUE_)?INDEX\(\s*(\w+),\s*(\d+),\s*(.+)\)/) + { + my ($is_unique, $index_name, $index_oid, $using) = + ($1, $2, $3, $4); + push @{ $catalog{indexing} }, + sprintf( + "declare %sindex %s %s %s\n", + $is_unique ? 'unique ' : '', + $index_name, $index_oid, $using); + } + elsif (/^BUILD_INDICES/) + { + push @{ $catalog{indexing} }, "build indices\n"; + } + elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/) + { + $catalog{catname} = $1; + $catalog{relation_oid} = $2; + $catalog{relation_oid_macro} = $3; + + $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : ''; + $catalog{shared_relation} = + /BKI_SHARED_RELATION/ ? ' shared_relation' : ''; + $catalog{without_oids} = + /BKI_WITHOUT_OIDS/ ? ' without_oids' : ''; + if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/) { - my ($toast_name, $toast_oid, $index_oid) = ($1, $2, $3); - push @{ $catalog{toasting} }, - "declare toast $toast_oid $index_oid on $toast_name\n"; + $catalog{rowtype_oid} = $1; + $catalog{rowtype_oid_clause} = " rowtype_oid $1"; + $catalog{rowtype_oid_macro} = $2; } - elsif (/^DECLARE_(UNIQUE_)?INDEX\(\s*(\w+),\s*(\d+),\s*(.+)\)/) + else { - my ($is_unique, $index_name, $index_oid, $using) = - ($1, $2, $3, $4); - push @{ $catalog{indexing} }, - sprintf( - "declare %sindex %s %s %s\n", - $is_unique ? 'unique ' : '', - $index_name, $index_oid, $using); + $catalog{rowtype_oid} = ''; + $catalog{rowtype_oid_clause} = ''; + $catalog{rowtype_oid_macro} = ''; } - elsif (/^BUILD_INDICES/) + $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0; + $declaring_attributes = 1; + } + elsif ($is_client_code) + { + if (/^#endif/) { - push @{ $catalog{indexing} }, "build indices\n"; + $is_client_code = 0; } - elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/) + else { - $catalog{catname} = $1; - $catalog{relation_oid} = $2; - $catalog{relation_oid_macro} = $3; - - $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : ''; - $catalog{shared_relation} = - /BKI_SHARED_RELATION/ ? ' shared_relation' : ''; - $catalog{without_oids} = - /BKI_WITHOUT_OIDS/ ? ' without_oids' : ''; - if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/) - { - $catalog{rowtype_oid} = $1; - $catalog{rowtype_oid_clause} = " rowtype_oid $1"; - $catalog{rowtype_oid_macro} = $2; - } - else - { - $catalog{rowtype_oid} = ''; - $catalog{rowtype_oid_clause} = ''; - $catalog{rowtype_oid_macro} = ''; - } - $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0; - $declaring_attributes = 1; + push @{ $catalog{client_code} }, $_; } - elsif ($is_client_code) + } + elsif ($declaring_attributes) + { + next if (/^{|^$/); + if (/^}/) { - if (/^#endif/) - { - $is_client_code = 0; - } - else - { - push @{ $catalog{client_code} }, $_; - } + $declaring_attributes = 0; } - elsif ($declaring_attributes) + else { - next if (/^{|^$/); - if (/^}/) + my %column; + my @attopts = split /\s+/, $_; + my $atttype = shift @attopts; + my $attname = shift @attopts; + die "parse error ($input_file)" + unless ($attname and $atttype); + + if (exists $RENAME_ATTTYPE{$atttype}) { - $declaring_attributes = 0; + $atttype = $RENAME_ATTTYPE{$atttype}; } - else + + # If the C name ends with '[]' or '[digits]', we have + # an array type, so we discard that from the name and + # prepend '_' to the type. + if ($attname =~ /(\w+)\[\d*\]/) { - my %column; - my @attopts = split /\s+/, $_; - my $atttype = shift @attopts; - my $attname = shift @attopts; - die "parse error ($input_file)" - unless ($attname and $atttype); - - if (exists $RENAME_ATTTYPE{$atttype}) + $attname = $1; + $atttype = '_' . $atttype; + } + + $column{type} = $atttype; + $column{name} = $attname; + $column{is_varlen} = 1 if $is_varlen; + + foreach my $attopt (@attopts) + { + if ($attopt eq 'BKI_FORCE_NULL') { - $atttype = $RENAME_ATTTYPE{$atttype}; + $column{forcenull} = 1; } - - # If the C name ends with '[]' or '[digits]', we have - # an array type, so we discard that from the name and - # prepend '_' to the type. - if ($attname =~ /(\w+)\[\d*\]/) + elsif ($attopt eq 'BKI_FORCE_NOT_NULL') { - $attname = $1; - $atttype = '_' . $atttype; + $column{forcenotnull} = 1; } - $column{type} = $atttype; - $column{name} = $attname; - $column{is_varlen} = 1 if $is_varlen; + # We use quotes for values like \0 and \054, to + # make sure all compilers and syntax highlighters + # can recognize them properly. + elsif ($attopt =~ /BKI_DEFAULT\(['"]?([^'"]+)['"]?\)/) + { + $column{default} = $1; + } + elsif ($attopt =~ /BKI_LOOKUP\((\w+)\)/) + { + $column{lookup} = $1; + } + else + { + die + "unknown column option $attopt on column $attname"; + } - foreach my $attopt (@attopts) + if ($column{forcenull} and $column{forcenotnull}) { - if ($attopt eq 'BKI_FORCE_NULL') - { - $column{forcenull} = 1; - } - elsif ($attopt eq 'BKI_FORCE_NOT_NULL') - { - $column{forcenotnull} = 1; - } - # We use quotes for values like \0 and \054, to - # make sure all compilers and syntax highlighters - # can recognize them properly. - elsif ($attopt =~ /BKI_DEFAULT\(['"]?([^'"]+)['"]?\)/) - { - $column{default} = $1; - } - elsif ($attopt =~ /BKI_LOOKUP\((\w+)\)/) - { - $column{lookup} = $1; - } - else - { - die -"unknown column option $attopt on column $attname"; - } - - if ($column{forcenull} and $column{forcenotnull}) - { - die "$attname is forced both null and not null"; - } + die "$attname is forced both null and not null"; } - push @{ $catalog{columns} }, \%column; } + push @{ $catalog{columns} }, \%column; } } - close $ifh; + } + close $ifh; return \%catalog; } @@ -228,7 +229,7 @@ sub ParseData $input_file =~ /(\w+)\.dat$/ or die "Input file $input_file needs to be a .dat file.\n"; my $catname = $1; - my $data = []; + my $data = []; # Scan the input file. while (<$ifd>) @@ -311,8 +312,9 @@ sub AddDefaultValues { $row->{$attname} = $column->{default}; } - elsif ($catname eq 'pg_proc' && $attname eq 'pronargs' && - defined($row->{proargtypes})) + elsif ($catname eq 'pg_proc' + && $attname eq 'pronargs' + && defined($row->{proargtypes})) { # pg_proc.pronargs can be derived from proargtypes. my @proargtypes = split /\s+/, $row->{proargtypes}; @@ -328,7 +330,7 @@ sub AddDefaultValues if (@missing_fields) { die sprintf "missing values for field(s) %s in %s.dat line %s\n", - join(', ', @missing_fields), $catname, $row->{line_number}; + join(', ', @missing_fields), $catname, $row->{line_number}; } } @@ -379,7 +381,7 @@ sub FindDefinedSymbol sub FindDefinedSymbolFromData { my ($data, $symbol) = @_; - foreach my $row (@{ $data }) + foreach my $row (@{$data}) { if ($row->{oid_symbol} eq $symbol) { diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index 9cf26263f8..5d4fa5c154 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -116,10 +116,12 @@ foreach my $header (@input_files) # within a given Postgres release, such as fixed OIDs. Do not substitute # anything that could depend on platform or configuration. (The right place # to handle those sorts of things is in initdb.c's bootstrap_template1().) -my $BOOTSTRAP_SUPERUSERID = Catalog::FindDefinedSymbolFromData( - $catalog_data{pg_authid}, 'BOOTSTRAP_SUPERUSERID'); -my $PG_CATALOG_NAMESPACE = Catalog::FindDefinedSymbolFromData( - $catalog_data{pg_namespace}, 'PG_CATALOG_NAMESPACE'); +my $BOOTSTRAP_SUPERUSERID = + Catalog::FindDefinedSymbolFromData($catalog_data{pg_authid}, + 'BOOTSTRAP_SUPERUSERID'); +my $PG_CATALOG_NAMESPACE = + Catalog::FindDefinedSymbolFromData($catalog_data{pg_namespace}, + 'PG_CATALOG_NAMESPACE'); # Build lookup tables for OID macro substitutions and for pg_attribute @@ -138,8 +140,7 @@ foreach my $row (@{ $catalog_data{pg_opclass} }) { # There is no unique name, so we need to combine access method # and opclass name. - my $key = sprintf "%s/%s", - $row->{opcmethod}, $row->{opcname}; + my $key = sprintf "%s/%s", $row->{opcmethod}, $row->{opcname}; $opcoids{$key} = $row->{oid}; } @@ -160,8 +161,7 @@ foreach my $row (@{ $catalog_data{pg_opfamily} }) { # There is no unique name, so we need to combine access method # and opfamily name. - my $key = sprintf "%s/%s", - $row->{opfmethod}, $row->{opfname}; + my $key = sprintf "%s/%s", $row->{opfmethod}, $row->{opfname}; $opfoids{$key} = $row->{oid}; } @@ -179,6 +179,7 @@ foreach my $row (@{ $catalog_data{pg_proc} }) { $procoids{$prokey} = $row->{oid}; } + # Also generate an entry using proname(proargtypes). This is not quite # identical to regprocedure lookup because we don't worry much about # special SQL names for types etc; we just use the names in the source @@ -201,7 +202,7 @@ my %types; foreach my $row (@{ $catalog_data{pg_type} }) { $typeoids{ $row->{typname} } = $row->{oid}; - $types{ $row->{typname} } = $row; + $types{ $row->{typname} } = $row; } # Map catalog name to OID lookup. @@ -211,8 +212,7 @@ my %lookup_kind = ( pg_operator => \%operoids, pg_opfamily => \%opfoids, pg_proc => \%procoids, - pg_type => \%typeoids -); + pg_type => \%typeoids); # Generate postgres.bki, postgres.description, postgres.shdescription, @@ -345,11 +345,14 @@ EOM # Complain about unrecognized keys; they are presumably misspelled foreach my $key (keys %bki_values) { - next if $key eq "oid" || $key eq "oid_symbol" || $key eq "descr" - || $key eq "line_number"; + next + if $key eq "oid" + || $key eq "oid_symbol" + || $key eq "descr" + || $key eq "line_number"; die sprintf "unrecognized field name \"%s\" in %s.dat line %s\n", - $key, $catname, $bki_values{line_number} - if (!exists($attnames{$key})); + $key, $catname, $bki_values{line_number} + if (!exists($attnames{$key})); } # Perform required substitutions on fields @@ -379,8 +382,8 @@ EOM if ($atttype eq 'oidvector') { @lookupnames = split /\s+/, $bki_values{$attname}; - @lookupoids = lookup_oids($lookup, $catname, - \%bki_values, @lookupnames); + @lookupoids = lookup_oids($lookup, $catname, \%bki_values, + @lookupnames); $bki_values{$attname} = join(' ', @lookupoids); } elsif ($atttype eq '_oid') @@ -389,17 +392,18 @@ EOM { $bki_values{$attname} =~ s/[{}]//g; @lookupnames = split /,/, $bki_values{$attname}; - @lookupoids = lookup_oids($lookup, $catname, - \%bki_values, @lookupnames); - $bki_values{$attname} = - sprintf "{%s}", join(',', @lookupoids); + @lookupoids = + lookup_oids($lookup, $catname, \%bki_values, + @lookupnames); + $bki_values{$attname} = sprintf "{%s}", + join(',', @lookupoids); } } else { $lookupnames[0] = $bki_values{$attname}; - @lookupoids = lookup_oids($lookup, $catname, - \%bki_values, @lookupnames); + @lookupoids = lookup_oids($lookup, $catname, \%bki_values, + @lookupnames); $bki_values{$attname} = $lookupoids[0]; } } @@ -562,7 +566,7 @@ sub gen_pg_attribute morph_row_for_schemapg(\%row, $schema); push @{ $schemapg_entries{$table_name} }, sprintf "{ %s }", - join(', ', grep { defined $_ } @row{@attnames}); + join(', ', grep { defined $_ } @row{@attnames}); } # Generate entries for system attributes. @@ -589,7 +593,7 @@ sub gen_pg_attribute # Omit the oid column if the catalog doesn't have them next if $table->{without_oids} - && $attr->{name} eq 'oid'; + && $attr->{name} eq 'oid'; morph_row_for_pgattr(\%row, $schema, $attr, 1); print_bki_insert(\%row, $schema); @@ -641,11 +645,11 @@ sub morph_row_for_pgattr # compare DefineAttr in bootstrap.c. oidvector and # int2vector are also treated as not-nullable. $row->{attnotnull} = - $type->{typname} eq 'oidvector' ? 't' - : $type->{typname} eq 'int2vector' ? 't' - : $type->{typlen} eq 'NAMEDATALEN' ? 't' - : $type->{typlen} > 0 ? 't' - : 'f'; + $type->{typname} eq 'oidvector' ? 't' + : $type->{typname} eq 'int2vector' ? 't' + : $type->{typlen} eq 'NAMEDATALEN' ? 't' + : $type->{typlen} > 0 ? 't' + : 'f'; } else { @@ -686,7 +690,7 @@ sub print_bki_insert # the "id" pattern in bootscanner.l, currently "[-A-Za-z0-9_]+". $bki_value = sprintf(qq'"%s"', $bki_value) if length($bki_value) == 0 - or $bki_value =~ /[^-A-Za-z0-9_]/; + or $bki_value =~ /[^-A-Za-z0-9_]/; push @bki_values, $bki_value; } @@ -725,7 +729,7 @@ sub morph_row_for_schemapg # don't change. elsif ($atttype eq 'bool') { - $row->{$attname} = 'true' if $row->{$attname} eq 't'; + $row->{$attname} = 'true' if $row->{$attname} eq 't'; $row->{$attname} = 'false' if $row->{$attname} eq 'f'; } @@ -756,9 +760,10 @@ sub lookup_oids else { push @lookupoids, $lookupname; - warn sprintf "unresolved OID reference \"%s\" in %s.dat line %s\n", - $lookupname, $catname, $bki_values->{line_number} - if $lookupname ne '-' and $lookupname ne '0'; + warn sprintf + "unresolved OID reference \"%s\" in %s.dat line %s\n", + $lookupname, $catname, $bki_values->{line_number} + if $lookupname ne '-' and $lookupname ne '0'; } } return @lookupoids; @@ -772,10 +777,10 @@ sub form_pg_type_symbol # Skip for rowtypes of bootstrap catalogs, since they have their # own naming convention defined elsewhere. return - if $typename eq 'pg_type' - or $typename eq 'pg_proc' - or $typename eq 'pg_attribute' - or $typename eq 'pg_class'; + if $typename eq 'pg_type' + or $typename eq 'pg_proc' + or $typename eq 'pg_attribute' + or $typename eq 'pg_class'; # Transform like so: # foo_bar -> FOO_BAROID diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl index 4f5af79d0b..3ba1611f18 100644 --- a/src/backend/utils/Gen_fmgrtab.pl +++ b/src/backend/utils/Gen_fmgrtab.pl @@ -68,7 +68,7 @@ foreach my $datfile (@input_files) my $header = "$1.h"; die "There in no header file corresponding to $datfile" - if ! -e $header; + if !-e $header; my $catalog = Catalog::ParseHeader($header); my $catname = $catalog->{catname}; @@ -79,10 +79,12 @@ foreach my $datfile (@input_files) } # Fetch some values for later. -my $FirstBootstrapObjectId = Catalog::FindDefinedSymbol( - 'access/transam.h', \@include_path, 'FirstBootstrapObjectId'); -my $INTERNALlanguageId = Catalog::FindDefinedSymbolFromData( - $catalog_data{pg_language}, 'INTERNALlanguageId'); +my $FirstBootstrapObjectId = + Catalog::FindDefinedSymbol('access/transam.h', \@include_path, + 'FirstBootstrapObjectId'); +my $INTERNALlanguageId = + Catalog::FindDefinedSymbolFromData($catalog_data{pg_language}, + 'INTERNALlanguageId'); print "Generating fmgrtab.c, fmgroids.h, and fmgrprotos.h...\n"; @@ -230,7 +232,7 @@ foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr) print $tfh " { $s->{oid}, \"$s->{prosrc}\", $s->{nargs}, $bmap{$s->{strict}}, $bmap{$s->{retset}}, $s->{prosrc} }"; - $fmgr_builtin_oid_index[$s->{oid}] = $fmgr_count++; + $fmgr_builtin_oid_index[ $s->{oid} ] = $fmgr_count++; if ($fmgr_count <= $#fmgr) { diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl index 8609d2ecbc..599460ca88 100644 --- a/src/bin/initdb/t/001_initdb.pl +++ b/src/bin/initdb/t/001_initdb.pl @@ -49,12 +49,13 @@ mkdir $datadir; 'successful creation'); # Permissions on PGDATA should be default - SKIP: + SKIP: { - skip "unix-style permissions not supported on Windows", 1 if ($windows_os); + skip "unix-style permissions not supported on Windows", 1 + if ($windows_os); ok(check_mode_recursive($datadir, 0700, 0600), - "check PGDATA permissions"); + "check PGDATA permissions"); } } command_ok([ 'initdb', '-S', $datadir ], 'sync only'); @@ -63,7 +64,8 @@ command_fails([ 'initdb', $datadir ], 'existing data directory'); # Check group access on PGDATA SKIP: { - skip "unix-style permissions not supported on Windows", 2 if ($windows_os); + skip "unix-style permissions not supported on Windows", 2 + if ($windows_os); # Init a new db with group access my $datadir_group = "$tempdir/data_group"; diff --git a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl index 1d3a1e4fb9..fdedd2faaa 100644 --- a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl +++ b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl @@ -10,11 +10,8 @@ program_options_handling_ok('pg_archivecleanup'); my $tempdir = TestLib::tempdir; my @walfiles = ( - '00000001000000370000000C.gz', - '00000001000000370000000D', - '00000001000000370000000E', - '00000001000000370000000F.partial', -); + '00000001000000370000000C.gz', '00000001000000370000000D', + '00000001000000370000000E', '00000001000000370000000F.partial',); sub create_files { @@ -28,32 +25,41 @@ sub create_files create_files(); -command_fails_like(['pg_archivecleanup'], - qr/must specify archive location/, - 'fails if archive location is not specified'); +command_fails_like( + ['pg_archivecleanup'], + qr/must specify archive location/, + 'fails if archive location is not specified'); -command_fails_like(['pg_archivecleanup', $tempdir], - qr/must specify oldest kept WAL file/, - 'fails if oldest kept WAL file name is not specified'); +command_fails_like( + [ 'pg_archivecleanup', $tempdir ], + qr/must specify oldest kept WAL file/, + 'fails if oldest kept WAL file name is not specified'); -command_fails_like(['pg_archivecleanup', 'notexist', 'foo'], - qr/archive location .* does not exist/, - 'fails if archive location does not exist'); +command_fails_like( + [ 'pg_archivecleanup', 'notexist', 'foo' ], + qr/archive location .* does not exist/, + 'fails if archive location does not exist'); -command_fails_like(['pg_archivecleanup', $tempdir, 'foo', 'bar'], - qr/too many command-line arguments/, - 'fails with too many command-line arguments'); +command_fails_like( + [ 'pg_archivecleanup', $tempdir, 'foo', 'bar' ], + qr/too many command-line arguments/, + 'fails with too many command-line arguments'); -command_fails_like(['pg_archivecleanup', $tempdir, 'foo'], - qr/invalid file name argument/, - 'fails with invalid restart file name'); +command_fails_like( + [ 'pg_archivecleanup', $tempdir, 'foo' ], + qr/invalid file name argument/, + 'fails with invalid restart file name'); { # like command_like but checking stderr my $stderr; - my $result = IPC::Run::run ['pg_archivecleanup', '-d', '-n', $tempdir, $walfiles[2]], '2>', \$stderr; + my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir, + $walfiles[2] ], '2>', \$stderr; ok($result, "pg_archivecleanup dry run: exit code 0"); - like($stderr, qr/$walfiles[1].*would be removed/, "pg_archivecleanup dry run: matches"); + like( + $stderr, + qr/$walfiles[1].*would be removed/, + "pg_archivecleanup dry run: matches"); foreach my $fn (@walfiles) { ok(-f "$tempdir/$fn", "$fn not removed"); @@ -66,16 +72,23 @@ sub run_check create_files(); - command_ok(['pg_archivecleanup', '-x', '.gz', $tempdir, $walfiles[2] . $suffix], - "$test_name: runs"); - - ok(! -f "$tempdir/$walfiles[0]", "$test_name: first older WAL file was cleaned up"); - ok(! -f "$tempdir/$walfiles[1]", "$test_name: second older WAL file was cleaned up"); - ok(-f "$tempdir/$walfiles[2]", "$test_name: restartfile was not cleaned up"); - ok(-f "$tempdir/$walfiles[3]", "$test_name: newer WAL file was not cleaned up"); - ok(-f "$tempdir/unrelated_file", "$test_name: unrelated file was not cleaned up"); + command_ok( + [ 'pg_archivecleanup', '-x', '.gz', $tempdir, + $walfiles[2] . $suffix ], + "$test_name: runs"); + + ok(!-f "$tempdir/$walfiles[0]", + "$test_name: first older WAL file was cleaned up"); + ok(!-f "$tempdir/$walfiles[1]", + "$test_name: second older WAL file was cleaned up"); + ok(-f "$tempdir/$walfiles[2]", + "$test_name: restartfile was not cleaned up"); + ok(-f "$tempdir/$walfiles[3]", + "$test_name: newer WAL file was not cleaned up"); + ok(-f "$tempdir/unrelated_file", + "$test_name: unrelated file was not cleaned up"); } -run_check('', 'pg_archivecleanup'); -run_check('.partial', 'pg_archivecleanup with .partial file'); +run_check('', 'pg_archivecleanup'); +run_check('.partial', 'pg_archivecleanup with .partial file'); run_check('.00000020.backup', 'pg_archivecleanup with .backup file'); diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 0cd510eeea..1c80ff7491 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -20,7 +20,7 @@ my $node = get_new_node('main'); umask(0077); # Initialize node without replication settings -$node->init(extra => [ '--data-checksums' ]); +$node->init(extra => ['--data-checksums']); $node->start; my $pgdata = $node->data_dir; @@ -47,7 +47,7 @@ ok(!-d "$tempdir/backup", 'backup directory was cleaned up'); # Create a backup directory that is not empty so the next commnd will fail # but leave the data directory behind mkdir("$tempdir/backup") - or BAIL_OUT("unable to create $tempdir/backup"); + or BAIL_OUT("unable to create $tempdir/backup"); append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data"); $node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ], @@ -86,13 +86,14 @@ my $baseUnloggedPath = $node->safe_psql('postgres', # Make sure main and init forks exist ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base'); -ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base'); +ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base'); # Create files that look like temporary relations to ensure they are ignored. my $postgresOid = $node->safe_psql('postgres', q{select oid from pg_database where datname = 'postgres'}); -my @tempRelationFiles = qw(t999_999 t9999_999.1 t999_9999_vm t99999_99999_vm.1); +my @tempRelationFiles = + qw(t999_999 t9999_999.1 t999_9999_vm t99999_99999_vm.1); foreach my $filename (@tempRelationFiles) { @@ -107,10 +108,11 @@ ok(-f "$tempdir/backup/PG_VERSION", 'backup was created'); # Permissions on backup should be default SKIP: { - skip "unix-style permissions not supported on Windows", 1 if ($windows_os); + skip "unix-style permissions not supported on Windows", 1 + if ($windows_os); ok(check_mode_recursive("$tempdir/backup", 0700, 0600), - "check backup dir permissions"); + "check backup dir permissions"); } # Only archive_status directory should be copied in pg_wal/. @@ -133,8 +135,7 @@ foreach my $dirname ( # These files should not be copied. foreach my $filename ( qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp - global/pg_internal.init) - ) + global/pg_internal.init)) { ok(!-f "$tempdir/backup/$filename", "$filename not copied"); } @@ -142,14 +143,14 @@ foreach my $filename ( # Unlogged relation forks other than init should not be copied ok(-f "$tempdir/backup/${baseUnloggedPath}_init", 'unlogged init fork in backup'); -ok(!-f "$tempdir/backup/$baseUnloggedPath", +ok( !-f "$tempdir/backup/$baseUnloggedPath", 'unlogged main fork not in backup'); # Temp relations should not be copied. foreach my $filename (@tempRelationFiles) { - ok(!-f "$tempdir/backup/base/$postgresOid/$filename", - "base/$postgresOid/$filename not copied"); + ok( !-f "$tempdir/backup/base/$postgresOid/$filename", + "base/$postgresOid/$filename not copied"); } # Make sure existing backup_label was ignored. @@ -212,7 +213,7 @@ SKIP: # Move pg_replslot out of $pgdata and create a symlink to it. $node->stop; - # Set umask so test directories and files are created with group permissions + # Set umask so test directories and files are created with group permissions umask(0027); # Enable group permissions on PGDATA @@ -244,24 +245,27 @@ SKIP: is(scalar(@tblspc_tars), 1, 'one tablespace tar was created'); rmtree("$tempdir/tarbackup2"); - # Create an unlogged table to test that forks other than init are not copied. + # Create an unlogged table to test that forks other than init are not copied. $node->safe_psql('postgres', - 'CREATE UNLOGGED TABLE tblspc1_unlogged (id int) TABLESPACE tblspc1;'); + 'CREATE UNLOGGED TABLE tblspc1_unlogged (id int) TABLESPACE tblspc1;' + ); - my $tblspc1UnloggedPath = $node->safe_psql( - 'postgres', q{select pg_relation_filepath('tblspc1_unlogged')}); + my $tblspc1UnloggedPath = $node->safe_psql('postgres', + q{select pg_relation_filepath('tblspc1_unlogged')}); # Make sure main and init forks exist - ok(-f "$pgdata/${tblspc1UnloggedPath}_init", + ok( -f "$pgdata/${tblspc1UnloggedPath}_init", 'unlogged init fork in tablespace'); - ok(-f "$pgdata/$tblspc1UnloggedPath", - 'unlogged main fork in tablespace'); + ok(-f "$pgdata/$tblspc1UnloggedPath", 'unlogged main fork in tablespace'); - # Create files that look like temporary relations to ensure they are ignored - # in a tablespace. + # Create files that look like temporary relations to ensure they are ignored + # in a tablespace. my @tempRelationFiles = qw(t888_888 t888888_888888_vm.1); - my $tblSpc1Id = basename(dirname(dirname($node->safe_psql('postgres', - q{select pg_relation_filepath('test1')})))); + my $tblSpc1Id = basename( + dirname( + dirname( + $node->safe_psql( + 'postgres', q{select pg_relation_filepath('test1')})))); foreach my $filename (@tempRelationFiles) { @@ -284,16 +288,17 @@ SKIP: -l "$tempdir/backup1/pg_tblspc/$_" and readlink "$tempdir/backup1/pg_tblspc/$_" eq "$tempdir/tbackup/tblspc1" - } readdir($dh)), + } readdir($dh)), "tablespace symlink was updated"); closedir $dh; # Group access should be enabled on all backup files ok(check_mode_recursive("$tempdir/backup1", 0750, 0640), - "check backup dir permissions"); + "check backup dir permissions"); # Unlogged relation forks other than init should not be copied - my ($tblspc1UnloggedBackupPath) = $tblspc1UnloggedPath =~ /[^\/]*\/[^\/]*\/[^\/]*$/g; + my ($tblspc1UnloggedBackupPath) = + $tblspc1UnloggedPath =~ /[^\/]*\/[^\/]*\/[^\/]*$/g; ok(-f "$tempdir/tbackup/tblspc1/${tblspc1UnloggedBackupPath}_init", 'unlogged init fork in tablespace backup'); @@ -303,15 +308,15 @@ SKIP: # Temp relations should not be copied. foreach my $filename (@tempRelationFiles) { - ok(!-f "$tempdir/tbackup/tblspc1/$tblSpc1Id/$postgresOid/$filename", - "[tblspc1]/$postgresOid/$filename not copied"); + ok( !-f "$tempdir/tbackup/tblspc1/$tblSpc1Id/$postgresOid/$filename", + "[tblspc1]/$postgresOid/$filename not copied"); # Also remove temp relation files or tablespace drop will fail. my $filepath = - "$shorter_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename"; + "$shorter_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename"; unlink($filepath) - or BAIL_OUT("unable to unlink $filepath"); + or BAIL_OUT("unable to unlink $filepath"); } ok( -d "$tempdir/backup1/pg_replslot", @@ -398,27 +403,35 @@ $node->command_fails( 'pg_basebackup fails with nonexistent replication slot'); $node->command_fails( - [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C' ], + [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C' ], 'pg_basebackup -C fails without slot name'); $node->command_fails( - [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0', '--no-slot' ], + [ 'pg_basebackup', '-D', + "$tempdir/backupxs_slot", '-C', + '-S', 'slot0', + '--no-slot' ], 'pg_basebackup fails with -C -S --no-slot'); $node->command_ok( - [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ], + [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ], 'pg_basebackup -C runs'); rmtree("$tempdir/backupxs_slot"); -is($node->safe_psql('postgres', q{SELECT slot_name FROM pg_replication_slots WHERE slot_name = 'slot0'}), - 'slot0', - 'replication slot was created'); -isnt($node->safe_psql('postgres', q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot0'}), - '', - 'restart LSN of new slot is not null'); +is( $node->safe_psql( + 'postgres', +q{SELECT slot_name FROM pg_replication_slots WHERE slot_name = 'slot0'}), + 'slot0', + 'replication slot was created'); +isnt( + $node->safe_psql( + 'postgres', +q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot0'}), + '', + 'restart LSN of new slot is not null'); $node->command_fails( - [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ], + [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ], 'pg_basebackup fails with -C -S and a previously existing slot'); $node->safe_psql('postgres', @@ -455,10 +468,10 @@ rmtree("$tempdir/backupxs_sl_R"); # create tables to corrupt and get their relfilenodes my $file_corrupt1 = $node->safe_psql('postgres', - q{SELECT a INTO corrupt1 FROM generate_series(1,10000) AS a; ALTER TABLE corrupt1 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt1')} +q{SELECT a INTO corrupt1 FROM generate_series(1,10000) AS a; ALTER TABLE corrupt1 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt1')} ); my $file_corrupt2 = $node->safe_psql('postgres', - q{SELECT b INTO corrupt2 FROM generate_series(1,2) AS b; ALTER TABLE corrupt2 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt2')} +q{SELECT b INTO corrupt2 FROM generate_series(1,2) AS b; ALTER TABLE corrupt2 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt2')} ); # set page header and block sizes @@ -473,31 +486,32 @@ syswrite($file, '\0\0\0\0\0\0\0\0\0'); close $file; system_or_bail 'pg_ctl', '-D', $pgdata, 'start'; -$node->command_checks_all([ 'pg_basebackup', '-D', "$tempdir/backup_corrupt"], +$node->command_checks_all( + [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt" ], 1, [qr{^$}], [qr/^WARNING.*checksum verification failed/s], - 'pg_basebackup reports checksum mismatch' -); + 'pg_basebackup reports checksum mismatch'); rmtree("$tempdir/backup_corrupt"); # induce further corruption in 5 more blocks system_or_bail 'pg_ctl', '-D', $pgdata, 'stop'; open $file, '+<', "$pgdata/$file_corrupt1"; -for my $i ( 1..5 ) { - my $offset = $pageheader_size + $i * $block_size; - seek($file, $offset, 0); - syswrite($file, '\0\0\0\0\0\0\0\0\0'); +for my $i (1 .. 5) +{ + my $offset = $pageheader_size + $i * $block_size; + seek($file, $offset, 0); + syswrite($file, '\0\0\0\0\0\0\0\0\0'); } close $file; system_or_bail 'pg_ctl', '-D', $pgdata, 'start'; -$node->command_checks_all([ 'pg_basebackup', '-D', "$tempdir/backup_corrupt2"], - 1, - [qr{^$}], - [qr/^WARNING.*further.*failures.*will.not.be.reported/s], - 'pg_basebackup does not report more than 5 checksum mismatches' -); +$node->command_checks_all( + [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt2" ], + 1, + [qr{^$}], + [qr/^WARNING.*further.*failures.*will.not.be.reported/s], + 'pg_basebackup does not report more than 5 checksum mismatches'); rmtree("$tempdir/backup_corrupt2"); # induce corruption in a second file @@ -508,17 +522,17 @@ syswrite($file, '\0\0\0\0\0\0\0\0\0'); close $file; system_or_bail 'pg_ctl', '-D', $pgdata, 'start'; -$node->command_checks_all([ 'pg_basebackup', '-D', "$tempdir/backup_corrupt3"], - 1, - [qr{^$}], - [qr/^WARNING.*7 total checksum verification failures/s], - 'pg_basebackup correctly report the total number of checksum mismatches' -); +$node->command_checks_all( + [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt3" ], + 1, + [qr{^$}], + [qr/^WARNING.*7 total checksum verification failures/s], + 'pg_basebackup correctly report the total number of checksum mismatches'); rmtree("$tempdir/backup_corrupt3"); # do not verify checksums, should return ok $node->command_ok( - [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt4", '-k' ], + [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt4", '-k' ], 'pg_basebackup with -k does not report checksum mismatch'); rmtree("$tempdir/backup_corrupt4"); diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl index 19c106d9f5..0793f9c115 100644 --- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl +++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl @@ -41,7 +41,8 @@ is($slot->{'slot_type'}, 'physical', 'physical replication slot was created'); is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null'); $primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ], 'dropping a replication slot'); -is($primary->slot($slot_name)->{'slot_type'}, '', 'replication slot was removed'); +is($primary->slot($slot_name)->{'slot_type'}, + '', 'replication slot was removed'); # Generate some WAL. Use --synchronous at the same time to add more # code coverage. Switch to the next segment first so that subsequent @@ -63,8 +64,9 @@ $primary->command_ok( # Permissions on WAL files should be default SKIP: { - skip "unix-style permissions not supported on Windows", 1 if ($windows_os); + skip "unix-style permissions not supported on Windows", 1 + if ($windows_os); ok(check_mode_recursive($stream_dir, 0700, 0600), - "check stream dir permissions"); + "check stream dir permissions"); } diff --git a/src/bin/pg_controldata/t/001_pg_controldata.pl b/src/bin/pg_controldata/t/001_pg_controldata.pl index af9fad7a38..c641c9c5f7 100644 --- a/src/bin/pg_controldata/t/001_pg_controldata.pl +++ b/src/bin/pg_controldata/t/001_pg_controldata.pl @@ -21,17 +21,20 @@ command_like([ 'pg_controldata', $node->data_dir ], # check with a corrupted pg_control my $pg_control = $node->data_dir . '/global/pg_control'; -my $size = (stat($pg_control))[7]; +my $size = (stat($pg_control))[7]; open my $fh, '>', $pg_control or BAIL_OUT($!); binmode $fh; + # fill file with zeros print $fh pack("x[$size]"); close $fh; -command_checks_all([ 'pg_controldata', $node->data_dir ], - 0, - [ qr/WARNING: Calculated CRC checksum does not match value stored in file/, - qr/WARNING: invalid WAL segment size/ ], - [ qr/^$/ ], - 'pg_controldata with corrupted pg_control'); +command_checks_all( + [ 'pg_controldata', $node->data_dir ], + 0, + [ +qr/WARNING: Calculated CRC checksum does not match value stored in file/, + qr/WARNING: invalid WAL segment size/ ], + [qr/^$/], + 'pg_controldata with corrupted pg_control'); diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl index 2f9dfa7b81..5bbb7997c3 100644 --- a/src/bin/pg_ctl/t/001_start_stop.pl +++ b/src/bin/pg_ctl/t/001_start_stop.pl @@ -63,14 +63,14 @@ command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], # Windows but we still want to do the restart test. my $logFileName = "$tempdir/data/perm-test-600.log"; -command_ok( - [ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ], +command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ], 'pg_ctl restart with server not running'); # Permissions on log file should be default SKIP: { - skip "unix-style permissions not supported on Windows", 2 if ($windows_os); + skip "unix-style permissions not supported on Windows", 2 + if ($windows_os); ok(-f $logFileName); ok(check_mode_recursive("$tempdir/data", 0700, 0600)); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 25852b903c..5202226f08 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -301,28 +301,26 @@ my %pgdump_runs = ( # Tests which target the 'dump_test' schema, specifically. my %dump_test_schema_runs = ( - only_dump_test_schema => 1, - test_schema_plus_blobs => 1, -); + only_dump_test_schema => 1, + test_schema_plus_blobs => 1,); # Tests which are considered 'full' dumps by pg_dump, but there # are flags used to exclude specific items (ACLs, blobs, etc). my %full_runs = ( - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + createdb => 1, + defaults => 1, exclude_dump_test_schema => 1, - exclude_test_table => 1, - exclude_test_table_data => 1, - no_blobs => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_dbprivs => 1, - schema_only => 1, - with_oids => 1, -); + exclude_test_table => 1, + exclude_test_table_data => 1, + no_blobs => 1, + no_owner => 1, + no_privs => 1, + pg_dumpall_dbprivs => 1, + schema_only => 1, + with_oids => 1,); # This is where the actual tests are defined. my %tests = ( @@ -336,13 +334,11 @@ my %tests = ( \QFOR ROLE regress_dump_test_role IN SCHEMA dump_test \E \QGRANT SELECT ON TABLES TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_post_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE' => { create_order => 55, @@ -354,11 +350,8 @@ my %tests = ( \QFOR ROLE regress_dump_test_role \E \QREVOKE ALL ON FUNCTIONS FROM PUBLIC;\E /xm, - like => { - %full_runs, - section_post_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_post_data => 1, }, + unlike => { no_privs => 1, }, }, 'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT' => { @@ -374,14 +367,11 @@ my %tests = ( \QFOR ROLE regress_dump_test_role \E \QGRANT INSERT,REFERENCES,DELETE,TRIGGER,TRUNCATE,UPDATE ON TABLES TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - section_post_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_post_data => 1, }, + unlike => { no_privs => 1, }, }, 'ALTER ROLE regress_dump_test_role' => { - regexp => qr/^ + regexp => qr/^ \QALTER ROLE regress_dump_test_role WITH \E \QNOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN \E \QNOREPLICATION NOBYPASSRLS;\E @@ -394,54 +384,40 @@ my %tests = ( 'ALTER COLLATION test0 OWNER TO' => { regexp => qr/^ALTER COLLATION public.test0 OWNER TO .*;/m, collation => 1, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - %dump_test_schema_runs, - no_owner => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { %dump_test_schema_runs, no_owner => 1, }, }, 'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => { - regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .*;/m, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_owner => 1, }, }, + regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .*;/m, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER SERVER s1 OWNER TO' => { - regexp => qr/^ALTER SERVER s1 OWNER TO .*;/m, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_owner => 1, }, }, + regexp => qr/^ALTER SERVER s1 OWNER TO .*;/m, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER FUNCTION dump_test.pltestlang_call_handler() OWNER TO' => { - regexp => qr/^ + regexp => qr/^ \QALTER FUNCTION dump_test.pltestlang_call_handler() \E \QOWNER TO \E .*;/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER OPERATOR FAMILY dump_test.op_family OWNER TO' => { - regexp => qr/^ + regexp => qr/^ \QALTER OPERATOR FAMILY dump_test.op_family USING btree \E \QOWNER TO \E .*;/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER OPERATOR FAMILY dump_test.op_family USING btree' => { create_order => 75, @@ -464,109 +440,95 @@ my %tests = ( \QFUNCTION 1 (integer, integer) btint4cmp(integer,integer) ,\E\n\s+ \QFUNCTION 2 (integer, integer) btint4sortsupport(internal);\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'ALTER OPERATOR CLASS dump_test.op_class OWNER TO' => { - regexp => qr/^ + regexp => qr/^ \QALTER OPERATOR CLASS dump_test.op_class USING btree \E \QOWNER TO \E .*;/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER PUBLICATION pub1 OWNER TO' => { - regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .*;/m, - like => { - %full_runs, - section_post_data => 1, }, - unlike => { - no_owner => 1, }, }, + regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .*;/m, + like => { %full_runs, section_post_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER LARGE OBJECT ... OWNER TO' => { - regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m, - like => { + regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m, + like => { %full_runs, - column_inserts => 1, - data_only => 1, - section_pre_data => 1, - test_schema_plus_blobs => 1, }, + column_inserts => 1, + data_only => 1, + section_pre_data => 1, + test_schema_plus_blobs => 1, }, unlike => { - no_blobs => 1, - no_owner => 1, + no_blobs => 1, + no_owner => 1, schema_only => 1, }, }, 'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => { - regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .*;/m, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_owner => 1, }, }, + regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .*;/m, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER SCHEMA dump_test OWNER TO' => { - regexp => qr/^ALTER SCHEMA dump_test OWNER TO .*;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + regexp => qr/^ALTER SCHEMA dump_test OWNER TO .*;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER SCHEMA dump_test_second_schema OWNER TO' => { - regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .*;/m, - like => { + regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .*;/m, + like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_owner => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER SEQUENCE test_table_col1_seq' => { - regexp => qr/^ + regexp => qr/^ \QALTER SEQUENCE dump_test.test_table_col1_seq OWNED BY dump_test.test_table.col1;\E /xm, like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER SEQUENCE test_third_table_col1_seq' => { - regexp => qr/^ + regexp => qr/^ \QALTER SEQUENCE dump_test_second_schema.test_third_table_col1_seq OWNED BY dump_test_second_schema.test_third_table.col1;\E /xm, like => { %full_runs, - role => 1, - section_pre_data => 1, }, }, + role => 1, + section_pre_data => 1, }, }, 'ALTER TABLE ONLY test_table ADD CONSTRAINT ... PRIMARY KEY' => { - regexp => qr/^ + regexp => qr/^ \QALTER TABLE ONLY dump_test.test_table\E \n^\s+ \QADD CONSTRAINT test_table_pkey PRIMARY KEY (col1);\E /xm, like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE ONLY test_table ALTER COLUMN col1 SET STATISTICS 90' => { create_order => 93, @@ -578,11 +540,11 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE ONLY test_table ALTER COLUMN col2 SET STORAGE' => { create_order => 94, @@ -594,11 +556,11 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE ONLY test_table ALTER COLUMN col3 SET STORAGE' => { create_order => 95, @@ -610,11 +572,11 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE ONLY test_table ALTER COLUMN col4 SET n_distinct' => { create_order => 95, @@ -626,19 +588,19 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2' => { - regexp => qr/^ + regexp => qr/^ \QALTER TABLE ONLY dump_test.measurement ATTACH PARTITION dump_test_second_schema.measurement_y2006m2 \E \QFOR VALUES FROM ('2006-02-01') TO ('2006-03-01');\E\n /xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'ALTER TABLE test_table CLUSTER ON test_table_pkey' => { create_order => 96, @@ -650,126 +612,115 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE test_table DISABLE TRIGGER ALL' => { - regexp => qr/^ + regexp => qr/^ \QSET SESSION AUTHORIZATION 'test_superuser';\E\n\n \QALTER TABLE dump_test.test_table DISABLE TRIGGER ALL;\E\n\n \QCOPY dump_test.test_table (col1, col2, col3, col4) FROM stdin;\E \n(?:\d\t\\N\t\\N\t\\N\n){9}\\\.\n\n\n \QALTER TABLE dump_test.test_table ENABLE TRIGGER ALL;\E/xm, - like => { data_only => 1, }, }, + like => { data_only => 1, }, }, 'ALTER FOREIGN TABLE foreign_table ALTER COLUMN c1 OPTIONS' => { - regexp => qr/^ + regexp => qr/^ \QALTER FOREIGN TABLE dump_test.foreign_table ALTER COLUMN c1 OPTIONS (\E\n \s+\Qcolumn_name 'col1'\E\n \Q);\E\n /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'ALTER TABLE test_table OWNER TO' => { - regexp => qr/^ALTER TABLE dump_test.test_table OWNER TO .*;/m, - like => { + regexp => qr/^ALTER TABLE dump_test.test_table OWNER TO .*;/m, + like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - no_owner => 1, }, }, + exclude_test_table => 1, + no_owner => 1, }, }, 'ALTER TABLE test_table ENABLE ROW LEVEL SECURITY' => { create_order => 23, create_sql => 'ALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;', - regexp => qr/^ALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;/m, - like => { + regexp => + qr/^ALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;/m, + like => { %full_runs, %dump_test_schema_runs, only_dump_test_table => 1, - section_post_data => 1, }, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER TABLE test_second_table OWNER TO' => { - regexp => qr/^ALTER TABLE dump_test.test_second_table OWNER TO .*;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + regexp => qr/^ALTER TABLE dump_test.test_second_table OWNER TO .*;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER TABLE test_third_table OWNER TO' => { - regexp => qr/^ALTER TABLE dump_test_second_schema.test_third_table OWNER TO .*;/m, - like => { + regexp => +qr/^ALTER TABLE dump_test_second_schema.test_third_table OWNER TO .*;/m, + like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_owner => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER TABLE measurement OWNER TO' => { - regexp => qr/^ALTER TABLE dump_test.measurement OWNER TO .*;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + regexp => qr/^ALTER TABLE dump_test.measurement OWNER TO .*;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER TABLE measurement_y2006m2 OWNER TO' => { - regexp => qr/^ALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO .*;/m, - like => { + regexp => +qr/^ALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO .*;/m, + like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_owner => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_owner => 1, }, }, 'ALTER FOREIGN TABLE foreign_table OWNER TO' => { - regexp => qr/^ALTER FOREIGN TABLE dump_test.foreign_table OWNER TO .*;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + regexp => + qr/^ALTER FOREIGN TABLE dump_test.foreign_table OWNER TO .*;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO' => { regexp => - qr/^ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 OWNER TO .*;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, +qr/^ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 OWNER TO .*;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_owner => 1, }, }, + no_owner => 1, }, }, 'ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO' => { regexp => - qr/^ALTER TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 OWNER TO .*;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, +qr/^ALTER TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 OWNER TO .*;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, only_dump_test_table => 1, @@ -783,16 +734,16 @@ my %tests = ( regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m, like => { %full_runs, - column_inserts => 1, - data_only => 1, - section_pre_data => 1, - test_schema_plus_blobs => 1, }, + column_inserts => 1, + data_only => 1, + section_pre_data => 1, + test_schema_plus_blobs => 1, }, unlike => { schema_only => 1, - no_blobs => 1, }, }, + no_blobs => 1, }, }, 'BLOB load (using lo_from_bytea)' => { - regexp => qr/^ + regexp => qr/^ \QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n \QSELECT pg_catalog.lowrite(0, \E \Q'\x310a320a330a340a350a360a370a380a390a');\E\n @@ -800,38 +751,41 @@ my %tests = ( /xm, like => { %full_runs, - column_inserts => 1, - data_only => 1, - section_data => 1, - test_schema_plus_blobs => 1, }, + column_inserts => 1, + data_only => 1, + section_data => 1, + test_schema_plus_blobs => 1, }, unlike => { binary_upgrade => 1, - no_blobs => 1, - schema_only => 1, }, }, + no_blobs => 1, + schema_only => 1, }, }, 'COMMENT ON DATABASE postgres' => { - regexp => qr/^COMMENT ON DATABASE postgres IS .*;/m, + regexp => qr/^COMMENT ON DATABASE postgres IS .*;/m, + # Should appear in the same tests as "CREATE DATABASE postgres" - like => { createdb => 1, }, }, + like => { createdb => 1, }, }, 'COMMENT ON EXTENSION plpgsql' => { - regexp => qr/^COMMENT ON EXTENSION plpgsql IS .*;/m, + regexp => qr/^COMMENT ON EXTENSION plpgsql IS .*;/m, + # this shouldn't ever get emitted anymore - like => {}, }, + like => {}, }, 'COMMENT ON TABLE dump_test.test_table' => { create_order => 36, create_sql => 'COMMENT ON TABLE dump_test.test_table IS \'comment on table\';', - regexp => qr/^COMMENT ON TABLE dump_test.test_table IS 'comment on table';/m, - like => { + regexp => + qr/^COMMENT ON TABLE dump_test.test_table IS 'comment on table';/m, + like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'COMMENT ON COLUMN dump_test.test_table.col1' => { create_order => 36, @@ -843,11 +797,11 @@ my %tests = ( like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'COMMENT ON COLUMN dump_test.composite.f1' => { create_order => 44, @@ -856,12 +810,9 @@ my %tests = ( regexp => qr/^ \QCOMMENT ON COLUMN dump_test.composite.f1 IS 'comment on column of type';\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON COLUMN dump_test.test_second_table.col1' => { create_order => 63, @@ -870,12 +821,9 @@ my %tests = ( regexp => qr/^ \QCOMMENT ON COLUMN dump_test.test_second_table.col1 IS 'comment on column col1';\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON COLUMN dump_test.test_second_table.col2' => { create_order => 64, @@ -884,12 +832,9 @@ my %tests = ( regexp => qr/^ \QCOMMENT ON COLUMN dump_test.test_second_table.col2 IS 'comment on column col2';\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON CONVERSION dump_test.test_conversion' => { create_order => 79, @@ -897,23 +842,18 @@ my %tests = ( IS \'comment on test conversion\';', regexp => qr/^COMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversion';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON COLLATION test0' => { create_order => 77, create_sql => 'COMMENT ON COLLATION test0 IS \'comment on test0 collation\';', regexp => - qr/^COMMENT ON COLLATION public.test0 IS 'comment on test0 collation';/m, +qr/^COMMENT ON COLLATION public.test0 IS 'comment on test0 collation';/m, collation => 1, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'COMMENT ON LARGE OBJECT ...' => { create_order => 65, @@ -929,12 +869,12 @@ qr/^COMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversi /xm, like => { %full_runs, - column_inserts => 1, - data_only => 1, - section_pre_data => 1, - test_schema_plus_blobs => 1, }, + column_inserts => 1, + data_only => 1, + section_pre_data => 1, + test_schema_plus_blobs => 1, }, unlike => { - no_blobs => 1, + no_blobs => 1, schema_only => 1, }, }, 'COMMENT ON PUBLICATION pub1' => { @@ -943,9 +883,7 @@ qr/^COMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversi IS \'comment on publication\';', regexp => qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m, - like => { - %full_runs, - section_post_data => 1, }, }, + like => { %full_runs, section_post_data => 1, }, }, 'COMMENT ON SUBSCRIPTION sub1' => { create_order => 55, @@ -953,9 +891,7 @@ qr/^COMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversi IS \'comment on subscription\';', regexp => qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m, - like => { - %full_runs, - section_post_data => 1, }, }, + like => { %full_runs, section_post_data => 1, }, }, 'COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => { create_order => 84, @@ -964,12 +900,9 @@ qr/^COMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversi IS \'comment on text search configuration\';', regexp => qr/^COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 IS 'comment on text search configuration';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => { create_order => 84, @@ -978,12 +911,9 @@ qr/^COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 IS 'comment on t IS \'comment on text search dictionary\';', regexp => qr/^COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 IS 'comment on text search dictionary';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1' => { create_order => 84, @@ -991,12 +921,9 @@ qr/^COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 IS 'comment on text IS \'comment on text search parser\';', regexp => qr/^COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 IS 'comment on text search parser';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => { create_order => 84, @@ -1004,61 +931,49 @@ qr/^COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 IS 'comment on text sear IS \'comment on text search template\';', regexp => qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text search template';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TYPE dump_test.planets - ENUM' => { create_order => 68, create_sql => 'COMMENT ON TYPE dump_test.planets IS \'comment on enum type\';', - regexp => qr/^COMMENT ON TYPE dump_test.planets IS 'comment on enum type';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + regexp => + qr/^COMMENT ON TYPE dump_test.planets IS 'comment on enum type';/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TYPE dump_test.textrange - RANGE' => { create_order => 69, create_sql => 'COMMENT ON TYPE dump_test.textrange IS \'comment on range type\';', - regexp => qr/^COMMENT ON TYPE dump_test.textrange IS 'comment on range type';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + regexp => +qr/^COMMENT ON TYPE dump_test.textrange IS 'comment on range type';/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TYPE dump_test.int42 - Regular' => { create_order => 70, create_sql => 'COMMENT ON TYPE dump_test.int42 IS \'comment on regular type\';', - regexp => qr/^COMMENT ON TYPE dump_test.int42 IS 'comment on regular type';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + regexp => + qr/^COMMENT ON TYPE dump_test.int42 IS 'comment on regular type';/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COMMENT ON TYPE dump_test.undefined - Undefined' => { create_order => 71, create_sql => 'COMMENT ON TYPE dump_test.undefined IS \'comment on undefined type\';', regexp => - qr/^COMMENT ON TYPE dump_test.undefined IS 'comment on undefined type';/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, +qr/^COMMENT ON TYPE dump_test.undefined IS 'comment on undefined type';/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'COPY test_table' => { create_order => 4, @@ -1071,15 +986,15 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s like => { %full_runs, %dump_test_schema_runs, - data_only => 1, - only_dump_test_table => 1, - section_data => 1, }, + data_only => 1, + only_dump_test_table => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - exclude_test_table => 1, - exclude_test_table_data => 1, - schema_only => 1, }, }, + exclude_test_table => 1, + exclude_test_table_data => 1, + schema_only => 1, }, }, 'COPY fk_reference_test_table' => { create_order => 22, @@ -1097,21 +1012,21 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s exclude_test_table_data => 1, section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, # In a data-only dump, we try to actually order according to FKs, # so this check is just making sure that the referring table comes after # the referred-to table. 'COPY fk_reference_test_table second' => { - regexp => qr/^ + regexp => qr/^ \QCOPY dump_test.test_table (col1, col2, col3, col4) FROM stdin;\E \n(?:\d\t\\N\t\\N\t\\N\n){9}\\\.\n.* \QCOPY dump_test.fk_reference_test_table (col1) FROM stdin;\E \n(?:\d\n){5}\\\.\n /xms, - like => { data_only => 1, }, }, + like => { data_only => 1, }, }, 'COPY test_second_table' => { create_order => 7, @@ -1125,12 +1040,12 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s like => { %full_runs, %dump_test_schema_runs, - data_only => 1, - section_data => 1, }, + data_only => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, 'COPY test_third_table' => { create_order => 12, @@ -1143,21 +1058,21 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s /xm, like => { %full_runs, - data_only => 1, - role => 1, - section_data => 1, }, + data_only => 1, + role => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_test_table_data => 1, - schema_only => 1, - with_oids => 1, }, }, + schema_only => 1, + with_oids => 1, }, }, 'COPY test_third_table WITH OIDS' => { - regexp => qr/^ + regexp => qr/^ \QCOPY dump_test_second_schema.test_third_table (col1) WITH OIDS FROM stdin;\E \n(?:\d+\t\d\n){9}\\\.\n /xm, - like => { with_oids => 1, }, }, + like => { with_oids => 1, }, }, 'COPY test_fourth_table' => { create_order => 7, @@ -1170,12 +1085,12 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s like => { %full_runs, %dump_test_schema_runs, - data_only => 1, - section_data => 1, }, + data_only => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, 'COPY test_fifth_table' => { create_order => 54, @@ -1188,12 +1103,12 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s like => { %full_runs, %dump_test_schema_runs, - data_only => 1, - section_data => 1, }, + data_only => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, 'COPY test_table_identity' => { create_order => 54, @@ -1206,44 +1121,45 @@ qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text s like => { %full_runs, %dump_test_schema_runs, - data_only => 1, - section_data => 1, }, + data_only => 1, + section_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, 'INSERT INTO test_table' => { - regexp => qr/^ + regexp => qr/^ (?:INSERT\ INTO\ dump_test.test_table\ \(col1,\ col2,\ col3,\ col4\)\ VALUES\ \(\d,\ NULL,\ NULL,\ NULL\);\n){9} /xm, - like => { column_inserts => 1, }, }, + like => { column_inserts => 1, }, }, 'INSERT INTO test_second_table' => { - regexp => qr/^ + regexp => qr/^ (?:INSERT\ INTO\ dump_test.test_second_table\ \(col1,\ col2\) \ VALUES\ \(\d,\ '\d'\);\n){9}/xm, - like => { column_inserts => 1, }, }, + like => { column_inserts => 1, }, }, 'INSERT INTO test_third_table' => { - regexp => qr/^ + regexp => qr/^ (?:INSERT\ INTO\ dump_test_second_schema.test_third_table\ \(col1\) \ VALUES\ \(\d\);\n){9}/xm, - like => { column_inserts => 1, }, }, + like => { column_inserts => 1, }, }, 'INSERT INTO test_fourth_table' => { - regexp => qr/^\QINSERT INTO dump_test.test_fourth_table DEFAULT VALUES;\E/m, - like => { column_inserts => 1, }, }, + regexp => + qr/^\QINSERT INTO dump_test.test_fourth_table DEFAULT VALUES;\E/m, + like => { column_inserts => 1, }, }, 'INSERT INTO test_fifth_table' => { regexp => qr/^\QINSERT INTO dump_test.test_fifth_table (col1, col2, col3, col4, col5) VALUES (NULL, true, false, B'11001', 'NaN');\E/m, - like => { column_inserts => 1, }, }, + like => { column_inserts => 1, }, }, 'INSERT INTO test_table_identity' => { regexp => qr/^\QINSERT INTO dump_test.test_table_identity (col1, col2) OVERRIDING SYSTEM VALUE VALUES (1, 'test');\E/m, - like => { column_inserts => 1, }, }, + like => { column_inserts => 1, }, }, 'CREATE ROLE regress_dump_test_role' => { create_order => 1, @@ -1260,9 +1176,7 @@ qr/^\QINSERT INTO dump_test.test_table_identity (col1, col2) OVERRIDING SYSTEM V 'CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler;', regexp => qr/CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler;/m, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE COLLATION test0 FROM "C"' => { create_order => 76, @@ -1270,9 +1184,7 @@ qr/^\QINSERT INTO dump_test.test_table_identity (col1, col2) OVERRIDING SYSTEM V regexp => qr/^ \QCREATE COLLATION public.test0 (provider = libc, locale = 'C');\E/xm, collation => 1, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE CAST FOR timestamptz' => { create_order => 51, @@ -1280,15 +1192,13 @@ qr/^\QINSERT INTO dump_test.test_table_identity (col1, col2) OVERRIDING SYSTEM V 'CREATE CAST (timestamptz AS interval) WITH FUNCTION age(timestamptz) AS ASSIGNMENT;', regexp => qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog\.age\(timestamp with time zone\) AS ASSIGNMENT;/m, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE DATABASE postgres' => { - regexp => qr/^ + regexp => qr/^ \QCREATE DATABASE postgres WITH TEMPLATE = template0 \E .*;/xm, - like => { createdb => 1, }, }, + like => { createdb => 1, }, }, 'CREATE DATABASE dump_test' => { create_order => 47, @@ -1296,12 +1206,13 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE DATABASE dump_test WITH TEMPLATE = template0 \E .*;/xm, - like => { pg_dumpall_dbprivs => 1, }, }, + like => { pg_dumpall_dbprivs => 1, }, }, 'CREATE EXTENSION ... plpgsql' => { - regexp => qr/^ + regexp => qr/^ \QCREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;\E /xm, + # this shouldn't ever get emitted anymore like => {}, }, @@ -1326,10 +1237,9 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - exclude_test_table => 1, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + exclude_test_table => 1, + section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE CONVERSION dump_test.test_conversion' => { create_order => 78, @@ -1337,12 +1247,9 @@ qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog 'CREATE DEFAULT CONVERSION dump_test.test_conversion FOR \'LATIN1\' TO \'UTF8\' FROM iso8859_1_to_utf8;', regexp => qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;\E/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE DOMAIN dump_test.us_postal_code' => { create_order => 29, @@ -1358,12 +1265,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \$\Q'::text) OR (VALUE ~ '^\d{5}-\d{4}\E\$ \Q'::text)));\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE FUNCTION dump_test.pltestlang_call_handler' => { create_order => 17, @@ -1377,12 +1281,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+AS\ \'\$ \Qlibdir\/plpgsql', 'plpgsql_call_handler';\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE FUNCTION dump_test.trigger_func' => { create_order => 30, @@ -1395,12 +1296,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+AS\ \$\$ \Q BEGIN RETURN NULL; END;\E \$\$;/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE FUNCTION dump_test.event_trigger_func' => { create_order => 32, @@ -1413,12 +1311,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+AS\ \$\$ \Q BEGIN RETURN; END;\E \$\$;/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE OPERATOR FAMILY dump_test.op_family' => { create_order => 73, @@ -1427,12 +1322,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' regexp => qr/^ \QCREATE OPERATOR FAMILY dump_test.op_family USING btree;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE OPERATOR CLASS dump_test.op_class' => { create_order => 74, @@ -1457,12 +1349,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \QFUNCTION 1 (bigint, bigint) btint8cmp(bigint,bigint) ,\E\n\s+ \QFUNCTION 2 (bigint, bigint) btint8sortsupport(internal);\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE EVENT TRIGGER test_event_trigger' => { create_order => 33, @@ -1474,9 +1363,7 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \QON ddl_command_start\E \n\s+\QEXECUTE PROCEDURE dump_test.event_trigger_func();\E /xm, - like => { - %full_runs, - section_post_data => 1, }, }, + like => { %full_runs, section_post_data => 1, }, }, 'CREATE TRIGGER test_trigger' => { create_order => 31, @@ -1492,10 +1379,10 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { - exclude_test_table => 1, + exclude_test_table => 1, exclude_dump_test_schema => 1, }, }, 'CREATE TYPE dump_test.planets AS ENUM' => { @@ -1508,16 +1395,14 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+'earth', \n\s+'mars' \n\);/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, }, }, 'CREATE TYPE dump_test.planets AS ENUM pg_upgrade' => { - regexp => qr/^ + regexp => qr/^ \QCREATE TYPE dump_test.planets AS ENUM (\E \n\);.*^ \QALTER TYPE dump_test.planets ADD VALUE 'venus';\E @@ -1526,7 +1411,7 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n.*^ \QALTER TYPE dump_test.planets ADD VALUE 'mars';\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE TYPE dump_test.textrange AS RANGE' => { create_order => 38, @@ -1537,23 +1422,17 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+\Qsubtype = text,\E \n\s+\Qcollation = pg_catalog."C"\E \n\);/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TYPE dump_test.int42' => { create_order => 39, create_sql => 'CREATE TYPE dump_test.int42;', regexp => qr/^CREATE TYPE dump_test.int42;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => { create_order => 80, @@ -1562,15 +1441,12 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' regexp => qr/^ \QCREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 (\E\n \s+\QPARSER = pg_catalog."default" );\E/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 ...' => { - regexp => qr/^ + regexp => qr/^ \QALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1\E\n \s+\QADD MAPPING FOR asciiword WITH english_stem;\E\n \n @@ -1629,12 +1505,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \s+\QADD MAPPING FOR uint WITH simple;\E\n \n /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => { create_order => 81, @@ -1643,12 +1516,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' regexp => qr/^ \QCREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 (\E\n \s+\QLEXIZE = dsimple_lexize );\E/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1' => { create_order => 82, @@ -1661,12 +1531,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \s+\QEND = prsd_end,\E\n \s+\QLEXTYPES = prsd_lextype );\E\n /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => { create_order => 83, @@ -1676,12 +1543,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \QCREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 (\E\n \s+\QTEMPLATE = pg_catalog.simple );\E\n /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE FUNCTION dump_test.int42_in' => { create_order => 40, @@ -1693,12 +1557,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+\QLANGUAGE internal IMMUTABLE STRICT\E \n\s+AS\ \$\$int4in\$\$; /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE FUNCTION dump_test.int42_out' => { create_order => 41, @@ -1710,12 +1571,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+\QLANGUAGE internal IMMUTABLE STRICT\E \n\s+AS\ \$\$int4out\$\$; /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE PROCEDURE dump_test.ptest1' => { create_order => 41, @@ -1726,12 +1584,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+\QLANGUAGE sql\E \n\s+AS\ \$\$\Q INSERT INTO dump_test.test_table (col1) VALUES (a) \E\$\$; /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TYPE dump_test.int42 populated' => { create_order => 42, @@ -1752,12 +1607,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+\QSTORAGE = plain,\E \n\s+PASSEDBYVALUE\n\); /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TYPE dump_test.composite' => { create_order => 43, @@ -1771,39 +1623,29 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \n\s+\Qf2 dump_test.int42\E \n\); /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TYPE dump_test.undefined' => { create_order => 39, create_sql => 'CREATE TYPE dump_test.undefined;', regexp => qr/^CREATE TYPE dump_test.undefined;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE FOREIGN DATA WRAPPER dummy' => { create_order => 35, create_sql => 'CREATE FOREIGN DATA WRAPPER dummy;', regexp => qr/CREATE FOREIGN DATA WRAPPER dummy;/m, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy' => { create_order => 36, create_sql => 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;', regexp => qr/CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;/m, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => { create_order => 88, @@ -1819,12 +1661,9 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' \s+\Qschema_name 'x1'\E\n \Q);\E\n /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1' => { create_order => 86, @@ -1832,9 +1671,7 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;', regexp => qr/CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;/m, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE TRANSFORM FOR int' => { create_order => 34, @@ -1842,9 +1679,7 @@ qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' 'CREATE TRANSFORM FOR int LANGUAGE SQL (FROM SQL WITH FUNCTION varchar_transform(internal), TO SQL WITH FUNCTION int4recv(internal));', regexp => qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog\.varchar_transform\(internal\), TO SQL WITH FUNCTION pg_catalog\.int4recv\(internal\)\);/m, - like => { - %full_runs, - section_pre_data => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, }, 'CREATE LANGUAGE pltestlang' => { create_order => 18, @@ -1853,12 +1688,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE PROCEDURAL LANGUAGE pltestlang \E \QHANDLER dump_test.pltestlang_call_handler;\E - /xm, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + /xm, + like => { %full_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE MATERIALIZED VIEW matview' => { create_order => 20, @@ -1870,12 +1702,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\QFROM dump_test.test_table\E \n\s+\QWITH NO DATA;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE MATERIALIZED VIEW matview_second' => { create_order => 21, @@ -1888,12 +1717,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\QFROM dump_test.matview\E \n\s+\QWITH NO DATA;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE MATERIALIZED VIEW matview_third' => { create_order => 58, @@ -1906,12 +1732,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\QFROM dump_test.matview_second\E \n\s+\QWITH NO DATA;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE MATERIALIZED VIEW matview_fourth' => { create_order => 59, @@ -1924,12 +1747,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\QFROM dump_test.matview_third\E \n\s+\QWITH NO DATA;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE POLICY p1 ON test_table' => { create_order => 22, @@ -1943,11 +1763,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE POLICY p2 ON test_table FOR SELECT' => { create_order => 24, @@ -1960,11 +1780,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE POLICY p3 ON test_table FOR INSERT' => { create_order => 25, @@ -1977,11 +1797,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE POLICY p4 ON test_table FOR UPDATE' => { create_order => 26, @@ -1994,11 +1814,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE POLICY p5 ON test_table FOR DELETE' => { create_order => 27, @@ -2011,11 +1831,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE POLICY p6 ON test_table AS RESTRICTIVE' => { create_order => 27, @@ -2028,11 +1848,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_post_data => 1, }, + only_dump_test_table => 1, + section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE PUBLICATION pub1' => { create_order => 50, @@ -2040,9 +1860,7 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE PUBLICATION pub1 WITH (publish = 'insert, update, delete, truncate');\E /xm, - like => { - %full_runs, - section_post_data => 1, }, }, + like => { %full_runs, section_post_data => 1, }, }, 'CREATE PUBLICATION pub2' => { create_order => 50, @@ -2052,9 +1870,7 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish = '');\E /xm, - like => { - %full_runs, - section_post_data => 1, }, }, + like => { %full_runs, section_post_data => 1, }, }, 'CREATE SUBSCRIPTION sub1' => { create_order => 50, @@ -2064,9 +1880,7 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE SUBSCRIPTION sub1 CONNECTION 'dbname=doesnotexist' PUBLICATION pub1 WITH (connect = false, slot_name = 'sub1');\E /xm, - like => { - %full_runs, - section_post_data => 1, }, }, + like => { %full_runs, section_post_data => 1, }, }, 'ALTER PUBLICATION pub1 ADD TABLE test_table' => { create_order => 51, @@ -2075,12 +1889,10 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QALTER PUBLICATION pub1 ADD TABLE ONLY dump_test.test_table;\E /xm, - like => { - %full_runs, - section_post_data => 1, }, + like => { %full_runs, section_post_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'ALTER PUBLICATION pub1 ADD TABLE test_second_table' => { create_order => 52, @@ -2089,27 +1901,22 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QALTER PUBLICATION pub1 ADD TABLE ONLY dump_test.test_second_table;\E /xm, - like => { - %full_runs, - section_post_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => { %full_runs, section_post_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE SCHEMA public' => { - regexp => qr/^CREATE SCHEMA public;/m, + regexp => qr/^CREATE SCHEMA public;/m, + # this shouldn't ever get emitted anymore - like => {}, }, + like => {}, }, 'CREATE SCHEMA dump_test' => { create_order => 2, create_sql => 'CREATE SCHEMA dump_test;', regexp => qr/^CREATE SCHEMA dump_test;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE SCHEMA dump_test_second_schema' => { create_order => 9, @@ -2117,8 +1924,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^CREATE SCHEMA dump_test_second_schema;/m, like => { %full_runs, - role => 1, - section_pre_data => 1, }, }, + role => 1, + section_pre_data => 1, }, }, 'CREATE TABLE test_table' => { create_order => 3, @@ -2141,11 +1948,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, }, }, + exclude_test_table => 1, }, }, 'CREATE TABLE fk_reference_test_table' => { create_order => 21, @@ -2157,12 +1964,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\Qcol1 integer NOT NULL\E \n\); /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TABLE test_second_table' => { create_order => 6, @@ -2176,12 +1980,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\Qcol2 text\E \n\); /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE UNLOGGED TABLE test_third_table WITH OIDS' => { create_order => 11, @@ -2201,9 +2002,10 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog /xm, like => { %full_runs, - role => 1, - section_pre_data => 1, }, + role => 1, + section_pre_data => 1, }, unlike => { + # FIXME figure out why/how binary upgrade drops OIDs. binary_upgrade => 1, }, }, @@ -2226,12 +2028,10 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \)\n \QPARTITION BY RANGE (logdate);\E\n /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, }, }, 'CREATE TABLE measurement_y2006m2 PARTITION OF' => { @@ -2248,10 +2048,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog /xm, like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - binary_upgrade => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { binary_upgrade => 1, }, }, 'CREATE TABLE test_fourth_table_zero_col' => { create_order => 6, @@ -2261,12 +2060,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \QCREATE TABLE dump_test.test_fourth_table (\E \n\); /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TABLE test_fifth_table' => { create_order => 53, @@ -2286,12 +2082,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\Qcol5 double precision\E \n\); /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE TABLE test_table_identity' => { create_order => 3, @@ -2314,12 +2107,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \s+\QCACHE 1\E\n \); /xms, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE STATISTICS extended_stats_no_options' => { create_order => 97, @@ -2328,12 +2118,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE STATISTICS dump_test.test_ext_stats_no_options ON col1, col2 FROM dump_test.test_fifth_table;\E /xms, - like => { - %full_runs, - %dump_test_schema_runs, - section_post_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE STATISTICS extended_stats_options' => { create_order => 97, @@ -2342,15 +2129,12 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QCREATE STATISTICS dump_test.test_ext_stats_opts (ndistinct) ON col1, col2 FROM dump_test.test_fifth_table;\E /xms, - like => { - %full_runs, - %dump_test_schema_runs, - section_post_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE SEQUENCE test_table_col1_seq' => { - regexp => qr/^ + regexp => qr/^ \QCREATE SEQUENCE dump_test.test_table_col1_seq\E \n\s+\QAS integer\E \n\s+\QSTART WITH 1\E @@ -2362,13 +2146,12 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + only_dump_test_table => 1, + section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE SEQUENCE test_third_table_col1_seq' => { - regexp => qr/^ + regexp => qr/^ \QCREATE SEQUENCE dump_test_second_schema.test_third_table_col1_seq\E \n\s+\QAS integer\E \n\s+\QSTART WITH 1\E @@ -2379,8 +2162,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog /xm, like => { %full_runs, - role => 1, - section_pre_data => 1, }, }, + role => 1, + section_pre_data => 1, }, }, 'CREATE UNIQUE INDEX test_third_table_idx ON test_third_table' => { create_order => 13, @@ -2392,32 +2175,33 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog /xm, like => { %full_runs, - role => 1, - section_post_data => 1, }, }, + role => 1, + section_post_data => 1, }, }, 'CREATE INDEX ON ONLY measurement' => { create_order => 92, - create_sql => 'CREATE INDEX ON dump_test.measurement (city_id, logdate);', + create_sql => + 'CREATE INDEX ON dump_test.measurement (city_id, logdate);', regexp => qr/^ \QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E /xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - exclude_test_table => 1, - exclude_test_table_data => 1, - no_blobs => 1, - no_privs => 1, - no_owner => 1, - only_dump_test_schema => 1, - pg_dumpall_dbprivs => 1, - schema_only => 1, - section_post_data => 1, - test_schema_plus_blobs => 1, - with_oids => 1, }, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + createdb => 1, + defaults => 1, + exclude_test_table => 1, + exclude_test_table_data => 1, + no_blobs => 1, + no_privs => 1, + no_owner => 1, + only_dump_test_schema => 1, + pg_dumpall_dbprivs => 1, + schema_only => 1, + section_post_data => 1, + test_schema_plus_blobs => 1, + with_oids => 1, }, unlike => { exclude_dump_test_schema => 1, only_dump_test_table => 1, @@ -2430,40 +2214,38 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog all_runs => 1, catch_all => 'CREATE ... commands', create_order => 93, - create_sql => 'ALTER TABLE dump_test.measurement ADD PRIMARY KEY (city_id, logdate);', + create_sql => +'ALTER TABLE dump_test.measurement ADD PRIMARY KEY (city_id, logdate);', regexp => qr/^ \QALTER TABLE ONLY dump_test.measurement\E \n^\s+ \QADD CONSTRAINT measurement_pkey PRIMARY KEY (city_id, logdate);\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_post_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'CREATE INDEX ... ON measurement_y2006_m2' => { - regexp => qr/^ + regexp => qr/^ \QCREATE INDEX measurement_y2006m2_city_id_logdate_idx ON dump_test_second_schema.measurement_y2006m2 \E /xm, like => { %full_runs, - role => 1, - section_post_data => 1, }, }, + role => 1, + section_post_data => 1, }, }, 'ALTER INDEX ... ATTACH PARTITION' => { - regexp => qr/^ + regexp => qr/^ \QALTER INDEX dump_test.measurement_city_id_logdate_idx ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_city_id_logdate_idx\E /xm, like => { %full_runs, - role => 1, - section_post_data => 1, }, }, + role => 1, + section_post_data => 1, }, }, 'ALTER INDEX ... ATTACH PARTITION (primary key)' => { - all_runs => 1, - catch_all => 'CREATE ... commands', - regexp => qr/^ + all_runs => 1, + catch_all => 'CREATE ... commands', + regexp => qr/^ \QALTER INDEX dump_test.measurement_pkey ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_pkey\E /xm, like => { @@ -2501,12 +2283,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog \n\s+\QSELECT test_table.col1\E \n\s+\QFROM dump_test.test_table\E \n\s+\QWITH LOCAL CHECK OPTION;\E/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, 'ALTER VIEW test_view SET DEFAULT' => { create_order => 62, @@ -2514,12 +2293,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog 'ALTER VIEW dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;', regexp => qr/^ \QALTER TABLE ONLY dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;\E/xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, - unlike => { - exclude_dump_test_schema => 1, }, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { exclude_dump_test_schema => 1, }, }, # FIXME 'DROP SCHEMA public (for testing without public schema)' => { @@ -2530,101 +2306,106 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog like => {}, }, 'DROP SCHEMA public' => { - regexp => qr/^DROP SCHEMA public;/m, + regexp => qr/^DROP SCHEMA public;/m, + # this shouldn't ever get emitted anymore - like => {}, }, + like => {}, }, 'DROP SCHEMA IF EXISTS public' => { - regexp => qr/^DROP SCHEMA IF EXISTS public;/m, + regexp => qr/^DROP SCHEMA IF EXISTS public;/m, + # this shouldn't ever get emitted anymore - like => {}, }, + like => {}, }, 'DROP EXTENSION plpgsql' => { - regexp => qr/^DROP EXTENSION plpgsql;/m, + regexp => qr/^DROP EXTENSION plpgsql;/m, + # this shouldn't ever get emitted anymore - like => {}, }, + like => {}, }, 'DROP FUNCTION dump_test.pltestlang_call_handler()' => { regexp => qr/^DROP FUNCTION dump_test\.pltestlang_call_handler\(\);/m, like => { clean => 1, }, }, 'DROP LANGUAGE pltestlang' => { - regexp => qr/^DROP PROCEDURAL LANGUAGE pltestlang;/m, - like => { clean => 1, }, }, + regexp => qr/^DROP PROCEDURAL LANGUAGE pltestlang;/m, + like => { clean => 1, }, }, 'DROP SCHEMA dump_test' => { - regexp => qr/^DROP SCHEMA dump_test;/m, - like => { clean => 1, }, }, + regexp => qr/^DROP SCHEMA dump_test;/m, + like => { clean => 1, }, }, 'DROP SCHEMA dump_test_second_schema' => { - regexp => qr/^DROP SCHEMA dump_test_second_schema;/m, - like => { clean => 1, }, }, + regexp => qr/^DROP SCHEMA dump_test_second_schema;/m, + like => { clean => 1, }, }, 'DROP TABLE test_table' => { - regexp => qr/^DROP TABLE dump_test\.test_table;/m, - like => { clean => 1, }, }, + regexp => qr/^DROP TABLE dump_test\.test_table;/m, + like => { clean => 1, }, }, 'DROP TABLE fk_reference_test_table' => { - regexp => qr/^DROP TABLE dump_test\.fk_reference_test_table;/m, - like => { clean => 1, }, }, + regexp => qr/^DROP TABLE dump_test\.fk_reference_test_table;/m, + like => { clean => 1, }, }, 'DROP TABLE test_second_table' => { - regexp => qr/^DROP TABLE dump_test\.test_second_table;/m, - like => { clean => 1, }, }, + regexp => qr/^DROP TABLE dump_test\.test_second_table;/m, + like => { clean => 1, }, }, 'DROP TABLE test_third_table' => { regexp => qr/^DROP TABLE dump_test_second_schema\.test_third_table;/m, like => { clean => 1, }, }, 'DROP EXTENSION IF EXISTS plpgsql' => { - regexp => qr/^DROP EXTENSION IF EXISTS plpgsql;/m, + regexp => qr/^DROP EXTENSION IF EXISTS plpgsql;/m, + # this shouldn't ever get emitted anymore - like => {}, }, + like => {}, }, 'DROP FUNCTION IF EXISTS dump_test.pltestlang_call_handler()' => { - regexp => qr/^ + regexp => qr/^ \QDROP FUNCTION IF EXISTS dump_test.pltestlang_call_handler();\E /xm, - like => { clean_if_exists => 1, }, }, + like => { clean_if_exists => 1, }, }, 'DROP LANGUAGE IF EXISTS pltestlang' => { - regexp => qr/^DROP PROCEDURAL LANGUAGE IF EXISTS pltestlang;/m, - like => { clean_if_exists => 1, }, }, + regexp => qr/^DROP PROCEDURAL LANGUAGE IF EXISTS pltestlang;/m, + like => { clean_if_exists => 1, }, }, 'DROP SCHEMA IF EXISTS dump_test' => { - regexp => qr/^DROP SCHEMA IF EXISTS dump_test;/m, - like => { clean_if_exists => 1, }, }, + regexp => qr/^DROP SCHEMA IF EXISTS dump_test;/m, + like => { clean_if_exists => 1, }, }, 'DROP SCHEMA IF EXISTS dump_test_second_schema' => { - regexp => qr/^DROP SCHEMA IF EXISTS dump_test_second_schema;/m, - like => { clean_if_exists => 1, }, }, + regexp => qr/^DROP SCHEMA IF EXISTS dump_test_second_schema;/m, + like => { clean_if_exists => 1, }, }, 'DROP TABLE IF EXISTS test_table' => { - regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_table;/m, - like => { clean_if_exists => 1, }, }, + regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_table;/m, + like => { clean_if_exists => 1, }, }, 'DROP TABLE IF EXISTS test_second_table' => { - regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_second_table;/m, - like => { clean_if_exists => 1, }, }, + regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_second_table;/m, + like => { clean_if_exists => 1, }, }, 'DROP TABLE IF EXISTS test_third_table' => { - regexp => qr/^ + regexp => qr/^ \QDROP TABLE IF EXISTS dump_test_second_schema.test_third_table;\E /xm, - like => { clean_if_exists => 1, }, }, + like => { clean_if_exists => 1, }, }, 'DROP ROLE regress_dump_test_role' => { - regexp => qr/^ + regexp => qr/^ \QDROP ROLE regress_dump_test_role;\E /xm, - like => { pg_dumpall_globals_clean => 1, }, }, + like => { pg_dumpall_globals_clean => 1, }, }, 'DROP ROLE pg_' => { - regexp => qr/^ + regexp => qr/^ \QDROP ROLE pg_\E.*; /xm, + # this shouldn't ever get emitted anywhere - like => {}, }, + like => {}, }, 'GRANT USAGE ON SCHEMA dump_test_second_schema' => { create_order => 10, @@ -2635,10 +2416,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog /xm, like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT USAGE ON FOREIGN DATA WRAPPER dummy' => { create_order => 85, @@ -2647,11 +2427,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QGRANT ALL ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT USAGE ON FOREIGN SERVER s1' => { create_order => 85, @@ -2660,11 +2437,8 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QGRANT ALL ON FOREIGN SERVER s1 TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT USAGE ON DOMAIN dump_test.us_postal_code' => { create_order => 72, @@ -2673,13 +2447,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QGRANT ALL ON TYPE dump_test.us_postal_code TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'GRANT USAGE ON TYPE dump_test.int42' => { create_order => 87, @@ -2688,13 +2460,11 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QGRANT ALL ON TYPE dump_test.int42 TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'GRANT USAGE ON TYPE dump_test.planets - ENUM' => { create_order => 66, @@ -2703,27 +2473,24 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QGRANT ALL ON TYPE dump_test.planets TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'GRANT USAGE ON TYPE dump_test.textrange - RANGE' => { create_order => 67, - create_sql => 'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;', + create_sql => +'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;', regexp => qr/^ \QGRANT ALL ON TYPE dump_test.textrange TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'GRANT CREATE ON DATABASE dump_test' => { create_order => 48, @@ -2732,23 +2499,23 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog regexp => qr/^ \QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E /xm, - like => { pg_dumpall_dbprivs => 1, }, }, + like => { pg_dumpall_dbprivs => 1, }, }, 'GRANT SELECT ON TABLE test_table' => { create_order => 5, create_sql => 'GRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;', regexp => - qr/^GRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;/m, +qr/^GRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;/m, like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - section_pre_data => 1, }, + only_dump_test_table => 1, + section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - exclude_test_table => 1, - no_privs => 1, }, }, + exclude_test_table => 1, + no_privs => 1, }, }, 'GRANT SELECT ON TABLE test_third_table' => { create_order => 19, @@ -2759,10 +2526,9 @@ qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_dump_test_role;/m, like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT ALL ON SEQUENCE test_third_table_col1_seq' => { create_order => 28, @@ -2774,10 +2540,9 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du /xm, like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT SELECT ON TABLE measurement' => { create_order => 91, @@ -2785,27 +2550,25 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du TABLE dump_test.measurement TO regress_dump_test_role;', regexp => - qr/^GRANT SELECT ON TABLE dump_test.measurement TO regress_dump_test_role;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, +qr/^GRANT SELECT ON TABLE dump_test.measurement TO regress_dump_test_role;/m, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'GRANT SELECT ON TABLE measurement_y2006m2' => { create_order => 92, create_sql => 'GRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;', - regexp => qr/^GRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;/m, + regexp => +qr/^GRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;/m, like => { %full_runs, - role => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + role => 1, + section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT ALL ON LARGE OBJECT ...' => { create_order => 60, @@ -2821,15 +2584,15 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du /xm, like => { %full_runs, - column_inserts => 1, - data_only => 1, - section_pre_data => 1, - test_schema_plus_blobs => 1, }, + column_inserts => 1, + data_only => 1, + section_pre_data => 1, + test_schema_plus_blobs => 1, }, unlike => { binary_upgrade => 1, - no_blobs => 1, - no_privs => 1, - schema_only => 1, }, }, + no_blobs => 1, + no_privs => 1, + schema_only => 1, }, }, 'GRANT INSERT(col1) ON TABLE test_second_table' => { create_order => 8, @@ -2839,13 +2602,11 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du regexp => qr/^ \QGRANT INSERT(col1) ON TABLE dump_test.test_second_table TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - %dump_test_schema_runs, - section_pre_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, unlike => { exclude_dump_test_schema => 1, - no_privs => 1, }, }, + no_privs => 1, }, }, 'GRANT EXECUTE ON FUNCTION pg_sleep() TO regress_dump_test_role' => { create_order => 16, @@ -2854,11 +2615,8 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du regexp => qr/^ \QGRANT ALL ON FUNCTION pg_catalog.pg_sleep(double precision) TO regress_dump_test_role;\E /xm, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT SELECT (proname ...) ON TABLE pg_proc TO public' => { create_order => 46, @@ -2925,59 +2683,53 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du \QGRANT SELECT(probin) ON TABLE pg_catalog.pg_proc TO PUBLIC;\E\n.* \QGRANT SELECT(proconfig) ON TABLE pg_catalog.pg_proc TO PUBLIC;\E\n.* \QGRANT SELECT(proacl) ON TABLE pg_catalog.pg_proc TO PUBLIC;\E/xms, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'GRANT USAGE ON SCHEMA public TO public' => { regexp => qr/^ \Q--\E\n\n \QGRANT USAGE ON SCHEMA public TO PUBLIC;\E /xm, + # this shouldn't ever get emitted anymore like => {}, }, 'REFRESH MATERIALIZED VIEW matview' => { - regexp => qr/^REFRESH MATERIALIZED VIEW dump_test.matview;/m, - like => { - %full_runs, - %dump_test_schema_runs, - section_post_data => 1, }, + regexp => qr/^REFRESH MATERIALIZED VIEW dump_test.matview;/m, + like => + { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, 'REFRESH MATERIALIZED VIEW matview_second' => { - regexp => qr/^ + regexp => qr/^ \QREFRESH MATERIALIZED VIEW dump_test.matview;\E \n.* \QREFRESH MATERIALIZED VIEW dump_test.matview_second;\E /xms, - like => { - %full_runs, - %dump_test_schema_runs, - section_post_data => 1, }, + like => + { %full_runs, %dump_test_schema_runs, section_post_data => 1, }, unlike => { - binary_upgrade => 1, + binary_upgrade => 1, exclude_dump_test_schema => 1, - schema_only => 1, }, }, + schema_only => 1, }, }, # FIXME 'REFRESH MATERIALIZED VIEW matview_third' => { - regexp => qr/^ + regexp => qr/^ \QREFRESH MATERIALIZED VIEW dump_test.matview_third;\E /xms, - like => {}, }, + like => {}, }, # FIXME 'REFRESH MATERIALIZED VIEW matview_fourth' => { - regexp => qr/^ + regexp => qr/^ \QREFRESH MATERIALIZED VIEW dump_test.matview_fourth;\E /xms, - like => {}, }, + like => {}, }, 'REVOKE CONNECT ON DATABASE dump_test FROM public' => { create_order => 49, @@ -2987,7 +2739,7 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du \QGRANT TEMPORARY ON DATABASE dump_test TO PUBLIC;\E\n \QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E /xm, - like => { pg_dumpall_dbprivs => 1, }, }, + like => { pg_dumpall_dbprivs => 1, }, }, 'REVOKE EXECUTE ON FUNCTION pg_sleep() FROM public' => { create_order => 15, @@ -2996,21 +2748,16 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du regexp => qr/^ \QREVOKE ALL ON FUNCTION pg_catalog.pg_sleep(double precision) FROM PUBLIC;\E /xm, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'REVOKE SELECT ON TABLE pg_proc FROM public' => { create_order => 45, create_sql => 'REVOKE SELECT ON TABLE pg_proc FROM public;', - regexp => qr/^REVOKE SELECT ON TABLE pg_catalog.pg_proc FROM PUBLIC;/m, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + regexp => + qr/^REVOKE SELECT ON TABLE pg_catalog.pg_proc FROM PUBLIC;/m, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'REVOKE CREATE ON SCHEMA public FROM public' => { create_order => 16, @@ -3019,11 +2766,8 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du \QREVOKE ALL ON SCHEMA public FROM PUBLIC;\E \n\QGRANT USAGE ON SCHEMA public TO PUBLIC;\E /xm, - like => { - %full_runs, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + like => { %full_runs, section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, 'REVOKE USAGE ON LANGUAGE plpgsql FROM public' => { create_order => 16, @@ -3032,11 +2776,10 @@ qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_du like => { %full_runs, %dump_test_schema_runs, - only_dump_test_table => 1, - role => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + only_dump_test_table => 1, + role => 1, + section_pre_data => 1, }, + unlike => { no_privs => 1, }, }, ); @@ -3121,8 +2864,9 @@ foreach my $run (sort keys %pgdump_runs) next; } - # If there is a like entry, but no unlike entry, then we will test the like case - if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key})) +# If there is a like entry, but no unlike entry, then we will test the like case + if ($tests{$test}->{like}->{$test_key} + && !defined($tests{$test}->{unlike}->{$test_key})) { $num_tests++; } @@ -3283,19 +3027,19 @@ foreach my $run (sort keys %pgdump_runs) # Run the test listed as a like, unless it is specifically noted # as an unlike (generally due to an explicit exclusion or similar). - if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key})) + if ($tests{$test}->{like}->{$test_key} + && !defined($tests{$test}->{unlike}->{$test_key})) { - if (!ok($output_file =~ $tests{$test}->{regexp}, "$run: should dump $test")) + if (!ok($output_file =~ $tests{$test}->{regexp}, + "$run: should dump $test")) { diag("Review $run results in $tempdir"); } } else { - if (!ok( - $output_file !~ - $tests{$test}->{regexp}, - "$run: should not dump $test")) + if (!ok($output_file !~ $tests{$test}->{regexp}, + "$run: should not dump $test")) { diag("Review $run results in $tempdir"); } diff --git a/src/bin/pg_resetwal/t/001_basic.pl b/src/bin/pg_resetwal/t/001_basic.pl index 0d6ab20073..ca93ddbda0 100644 --- a/src/bin/pg_resetwal/t/001_basic.pl +++ b/src/bin/pg_resetwal/t/001_basic.pl @@ -13,14 +13,14 @@ my $node = get_new_node('main'); $node->init; command_like([ 'pg_resetwal', '-n', $node->data_dir ], - qr/checkpoint/, - 'pg_resetwal -n produces output'); + qr/checkpoint/, 'pg_resetwal -n produces output'); # Permissions on PGDATA should be default SKIP: { - skip "unix-style permissions not supported on Windows", 1 if ($windows_os); + skip "unix-style permissions not supported on Windows", 1 + if ($windows_os); ok(check_mode_recursive($node->data_dir, 0700, 0600), 'check PGDATA permissions'); diff --git a/src/bin/pg_resetwal/t/002_corrupted.pl b/src/bin/pg_resetwal/t/002_corrupted.pl index 9df5574c8b..54bdbfd661 100644 --- a/src/bin/pg_resetwal/t/002_corrupted.pl +++ b/src/bin/pg_resetwal/t/002_corrupted.pl @@ -11,7 +11,7 @@ my $node = get_new_node('main'); $node->init; my $pg_control = $node->data_dir . '/global/pg_control'; -my $size = (stat($pg_control))[7]; +my $size = (stat($pg_control))[7]; # Read out the head of the file to get PG_CONTROL_VERSION in # particular. @@ -27,11 +27,14 @@ binmode $fh; print $fh pack("x[$size]"); close $fh; -command_checks_all([ 'pg_resetwal', '-n', $node->data_dir ], - 0, - [ qr/pg_control version number/ ], - [ qr/pg_resetwal: pg_control exists but is broken or wrong version; ignoring it/ ], - 'processes corrupted pg_control all zeroes'); +command_checks_all( + [ 'pg_resetwal', '-n', $node->data_dir ], + 0, + [qr/pg_control version number/], + [ +qr/pg_resetwal: pg_control exists but is broken or wrong version; ignoring it/ + ], + 'processes corrupted pg_control all zeroes'); # Put in the previously saved header data. This uses a different code # path internally, allowing us to process a zero WAL segment size. @@ -40,8 +43,11 @@ binmode $fh; print $fh $data, pack("x[" . ($size - 16) . "]"); close $fh; -command_checks_all([ 'pg_resetwal', '-n', $node->data_dir ], - 0, - [ qr/pg_control version number/ ], - [ qr/\Qpg_resetwal: pg_control specifies invalid WAL segment size (0 bytes); proceed with caution\E/ ], - 'processes zero WAL segment size'); +command_checks_all( + [ 'pg_resetwal', '-n', $node->data_dir ], + 0, + [qr/pg_control version number/], + [ +qr/\Qpg_resetwal: pg_control specifies invalid WAL segment size (0 bytes); proceed with caution\E/ + ], + 'processes zero WAL segment size'); diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm index 63d9bd517d..278ffd891c 100644 --- a/src/bin/pg_rewind/RewindTest.pm +++ b/src/bin/pg_rewind/RewindTest.pm @@ -115,16 +115,18 @@ sub check_query sub setup_cluster { - my $extra_name = shift; # Used to differentiate clusters - my $extra = shift; # Extra params for initdb + my $extra_name = shift; # Used to differentiate clusters + my $extra = shift; # Extra params for initdb # Initialize master, data checksums are mandatory - $node_master = get_new_node('master' . ($extra_name ? "_${extra_name}" : '')); - $node_master->init( - allows_streaming => 1, extra => $extra); + $node_master = + get_new_node('master' . ($extra_name ? "_${extra_name}" : '')); + $node_master->init(allows_streaming => 1, extra => $extra); + # Set wal_keep_segments to prevent WAL segment recycling after enforced # checkpoints in the tests. - $node_master->append_conf('postgresql.conf', qq( + $node_master->append_conf( + 'postgresql.conf', qq( wal_keep_segments = 20 )); } @@ -141,7 +143,8 @@ sub create_standby { my $extra_name = shift; - $node_standby = get_new_node('standby' . ($extra_name ? "_${extra_name}" : '')); + $node_standby = + get_new_node('standby' . ($extra_name ? "_${extra_name}" : '')); $node_master->backup('my_backup'); $node_standby->init_from_backup($node_master, 'my_backup'); my $connstr_master = $node_master->connstr(); @@ -239,10 +242,11 @@ sub run_pg_rewind "$tmp_folder/master-postgresql.conf.tmp", "$master_pgdata/postgresql.conf"); - chmod($node_master->group_access() ? 0640 : 0600, - "$master_pgdata/postgresql.conf") - or BAIL_OUT( - "unable to set permissions for $master_pgdata/postgresql.conf"); + chmod( + $node_master->group_access() ? 0640 : 0600, + "$master_pgdata/postgresql.conf") + or BAIL_OUT( + "unable to set permissions for $master_pgdata/postgresql.conf"); # Plug-in rewound node to the now-promoted standby node my $port_standby = $node_standby->port; diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl index 1b0f823b0c..9a0ce09a77 100644 --- a/src/bin/pg_rewind/t/001_basic.pl +++ b/src/bin/pg_rewind/t/001_basic.pl @@ -87,9 +87,10 @@ in master, before promotion 'tail-copy'); # Permissions on PGDATA should be default - SKIP: + SKIP: { - skip "unix-style permissions not supported on Windows", 1 if ($windows_os); + skip "unix-style permissions not supported on Windows", 1 + if ($windows_os); ok(check_mode_recursive($node_master->data_dir(), 0700, 0600), 'check PGDATA permissions'); diff --git a/src/bin/pg_rewind/t/002_databases.pl b/src/bin/pg_rewind/t/002_databases.pl index c364965d3a..bef0e173dc 100644 --- a/src/bin/pg_rewind/t/002_databases.pl +++ b/src/bin/pg_rewind/t/002_databases.pl @@ -43,9 +43,10 @@ template1 'database names'); # Permissions on PGDATA should have group permissions - SKIP: + SKIP: { - skip "unix-style permissions not supported on Windows", 1 if ($windows_os); + skip "unix-style permissions not supported on Windows", 1 + if ($windows_os); ok(check_mode_recursive($node_master->data_dir(), 0750, 0640), 'check PGDATA permissions'); diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index be08b20611..b346d245ac 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -81,8 +81,10 @@ pgbench( # Initialize pgbench tables scale 1 pgbench( - '-i', 0, [qr{^$}], - [ qr{creating tables}, qr{vacuuming}, qr{creating primary keys}, qr{done\.} ], + '-i', 0, + [qr{^$}], + [ qr{creating tables}, qr{vacuuming}, + qr{creating primary keys}, qr{done\.} ], 'pgbench scale 1 initialization',); # Again, with all possible options @@ -100,8 +102,9 @@ pgbench( # Test interaction of --init-steps with legacy step-selection options pgbench( - '--initialize --init-steps=dtpvgvv --no-vacuum --foreign-keys --unlogged-tables', - 0, [qr{^$}], +'--initialize --init-steps=dtpvgvv --no-vacuum --foreign-keys --unlogged-tables', + 0, + [qr{^$}], [ qr{dropping old tables}, qr{creating tables}, qr{creating primary keys}, @@ -218,15 +221,16 @@ COMMIT; # test expressions # command 1..3 and 23 depend on random seed which is used to call srandom. pgbench( - '--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dminint=-9223372036854775808 -Dn=null -Dt=t -Df=of -Dd=1.0', +'--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dminint=-9223372036854775808 -Dn=null -Dt=t -Df=of -Dd=1.0', 0, [ qr{type: .*/001_pgbench_expressions}, qr{processed: 1/1} ], [ qr{setting random seed to 5432\b}, + # After explicit seeding, the four * random checks (1-3,20) should be # deterministic, but not necessarily portable. - qr{command=1.: int 1\d\b}, # uniform random: 12 on linux - qr{command=2.: int 1\d\d\b}, # exponential random: 106 on linux - qr{command=3.: int 1\d\d\d\b}, # gaussian random: 1462 on linux + qr{command=1.: int 1\d\b}, # uniform random: 12 on linux + qr{command=2.: int 1\d\d\b}, # exponential random: 106 on linux + qr{command=3.: int 1\d\d\d\b}, # gaussian random: 1462 on linux qr{command=4.: int 4\b}, qr{command=5.: int 5\b}, qr{command=6.: int 6\b}, @@ -240,7 +244,7 @@ pgbench( qr{command=16.: double 16\b}, qr{command=17.: double 17\b}, qr{command=18.: int 9223372036854775807\b}, - qr{command=20.: int \d\b}, # zipfian random: 1 on linux + qr{command=20.: int \d\b}, # zipfian random: 1 on linux qr{command=21.: double -27\b}, qr{command=22.: double 1024\b}, qr{command=23.: double 1\b}, @@ -280,9 +284,9 @@ pgbench( qr{command=86.: int 86\b}, qr{command=93.: int 93\b}, qr{command=95.: int 0\b}, - qr{command=96.: int 1\b}, # :scale - qr{command=97.: int 0\b}, # :client_id - qr{command=98.: int 5432\b}, # :random_seed + qr{command=96.: int 1\b}, # :scale + qr{command=97.: int 0\b}, # :client_id + qr{command=98.: int 5432\b}, # :random_seed ], 'pgbench expressions', { '001_pgbench_expressions' => q{-- integer functions @@ -411,18 +415,20 @@ SELECT :v0, :v1, :v2, :v3; # random determinism when seeded $node->safe_psql('postgres', - 'CREATE UNLOGGED TABLE seeded_random(seed INT8 NOT NULL, rand TEXT NOT NULL, val INTEGER NOT NULL);'); +'CREATE UNLOGGED TABLE seeded_random(seed INT8 NOT NULL, rand TEXT NOT NULL, val INTEGER NOT NULL);' +); # same value to check for determinism my $seed = int(rand(1000000000)); for my $i (1, 2) { - pgbench("--random-seed=$seed -t 1", - 0, - [qr{processed: 1/1}], - [qr{setting random seed to $seed\b}], - "random seeded with $seed", - { "001_pgbench_random_seed_$i" => q{-- test random functions + pgbench( + "--random-seed=$seed -t 1", + 0, + [qr{processed: 1/1}], + [qr{setting random seed to $seed\b}], + "random seeded with $seed", + { "001_pgbench_random_seed_$i" => q{-- test random functions \set ur random(1000, 1999) \set er random_exponential(2000, 2999, 2.0) \set gr random_gaussian(3000, 3999, 3.0) @@ -436,16 +442,20 @@ INSERT INTO seeded_random(seed, rand, val) VALUES } # check that all runs generated the same 4 values -my ($ret, $out, $err) = - $node->psql('postgres', - 'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val'); +my ($ret, $out, $err) = $node->psql('postgres', +'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val' +); -ok($ret == 0, "psql seeded_random count ok"); +ok($ret == 0, "psql seeded_random count ok"); ok($err eq '', "psql seeded_random count stderr is empty"); -ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/, "psql seeded_random count uniform"); -ok($out =~ /\b$seed\|exponential\|2\d\d\d\|2/, "psql seeded_random count exponential"); -ok($out =~ /\b$seed\|gaussian\|3\d\d\d\|2/, "psql seeded_random count gaussian"); -ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/, "psql seeded_random count zipfian"); +ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/, + "psql seeded_random count uniform"); +ok( $out =~ /\b$seed\|exponential\|2\d\d\d\|2/, + "psql seeded_random count exponential"); +ok( $out =~ /\b$seed\|gaussian\|3\d\d\d\|2/, + "psql seeded_random count gaussian"); +ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/, + "psql seeded_random count zipfian"); $node->safe_psql('postgres', 'DROP TABLE seeded_random;'); @@ -481,8 +491,8 @@ my @errors = ( # SQL [ 'sql syntax error', 0, - [ qr{ERROR: syntax error}, qr{prepared statement .* does not exist} - ], + [ qr{ERROR: syntax error}, + qr{prepared statement .* does not exist} ], q{-- SQL syntax error SELECT 1 + ; } ], @@ -493,7 +503,7 @@ SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i); } ], # SHELL - [ 'shell bad command', 0, + [ 'shell bad command', 0, [qr{\(shell\) .* meta-command failed}], q{\shell no-such-command} ], [ 'shell undefined variable', 0, [qr{undefined variable ":nosuchvariable"}], @@ -557,52 +567,34 @@ SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i); 0, [qr{exponential parameter must be greater }], q{\set i random_exponential(0, 10, 0.0)} ], - [ 'set zipfian param to 1', + [ 'set zipfian param to 1', 0, [qr{zipfian parameter must be in range \(0, 1\) U \(1, \d+\]}], q{\set i random_zipfian(0, 10, 1)} ], - [ 'set zipfian param too large', + [ 'set zipfian param too large', 0, [qr{zipfian parameter must be in range \(0, 1\) U \(1, \d+\]}], q{\set i random_zipfian(0, 10, 1000000)} ], [ 'set non numeric value', 0, [qr{malformed variable "foo" value: "bla"}], q{\set i :foo + 1} ], - [ 'set no expression', - 1, - [qr{syntax error}], - q{\set i} ], - [ 'set missing argument', + [ 'set no expression', 1, [qr{syntax error}], q{\set i} ], + [ 'set missing argument', 1, [qr{missing argument}i], q{\set} ], + [ 'set not a bool', 0, + [qr{cannot coerce double to boolean}], q{\set b NOT 0.0} ], + [ 'set not an int', 0, + [qr{cannot coerce boolean to int}], q{\set i TRUE + 2} ], + [ 'set not a double', 0, + [qr{cannot coerce boolean to double}], q{\set d ln(TRUE)} ], + [ 'set case error', 1, - [qr{missing argument}i], - q{\set} ], - [ 'set not a bool', - 0, - [ qr{cannot coerce double to boolean} ], - q{\set b NOT 0.0} ], - [ 'set not an int', - 0, - [ qr{cannot coerce boolean to int} ], - q{\set i TRUE + 2} ], - [ 'set not a double', - 0, - [ qr{cannot coerce boolean to double} ], - q{\set d ln(TRUE)} ], - [ 'set case error', - 1, - [ qr{syntax error in command "set"} ], + [qr{syntax error in command "set"}], q{\set i CASE TRUE THEN 1 ELSE 0 END} ], - [ 'set random error', - 0, - [ qr{cannot coerce boolean to int} ], - q{\set b random(FALSE, TRUE)} ], - [ 'set number of args mismatch', - 1, - [ qr{unexpected number of arguments} ], - q{\set d ln(1.0, 2.0))} ], - [ 'set at least one arg', - 1, - [ qr{at least one argument expected} ], - q{\set i greatest())} ], + [ 'set random error', 0, + [qr{cannot coerce boolean to int}], q{\set b random(FALSE, TRUE)} ], + [ 'set number of args mismatch', 1, + [qr{unexpected number of arguments}], q{\set d ln(1.0, 2.0))} ], + [ 'set at least one arg', 1, + [qr{at least one argument expected}], q{\set i greatest())} ], # SETSHELL [ 'setshell not an int', 0, @@ -625,8 +617,8 @@ SELECT LEAST(:i, :i, :i, :i, :i, :i, :i, :i, :i, :i, :i); [ 'misc invalid backslash command', 1, [qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand} ], [ 'misc empty script', 1, [qr{empty command list for script}], q{} ], - [ 'bad boolean', 0, [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ], - ); + [ 'bad boolean', 0, + [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ],); for my $e (@errors) @@ -635,7 +627,7 @@ for my $e (@errors) my $n = '001_pgbench_error_' . $name; $n =~ s/ /_/g; pgbench( - '-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared', +'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared', $status, [ $status ? qr{^$} : qr{processed: 0/1} ], $re, @@ -647,7 +639,7 @@ for my $e (@errors) pgbench( '-t 1', 0, [ qr{processed: 1/1}, qr{zipfian cache array overflowed 1 time\(s\)} ], - [ qr{^} ], + [qr{^}], 'pgbench zipfian array overflow on random_zipfian', { '001_pgbench_random_zipfian' => q{ \set i random_zipfian(1, 100, 0.5) diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl index af21f04649..24714960ba 100644 --- a/src/bin/pgbench/t/002_pgbench_no_server.pl +++ b/src/bin/pgbench/t/002_pgbench_no_server.pl @@ -15,8 +15,7 @@ $testname =~ s/\.pl$//; my $testdir = "$TestLib::tmp_check/t_${testname}_stuff"; mkdir $testdir - or - BAIL_OUT("could not create test directory \"${testdir}\": $!"); + or BAIL_OUT("could not create test directory \"${testdir}\": $!"); # invoke pgbench sub pgbench @@ -38,8 +37,10 @@ sub pgbench_scripts for my $fn (sort keys %$files) { my $filename = $testdir . '/' . $fn; + # cleanup file weight if any $filename =~ s/\@\d+$//; + # cleanup from prior runs unlink $filename; append_to_file($filename, $$files{$fn}); @@ -105,14 +106,17 @@ my @options = ( [ 'ambiguous builtin', '-b s', [qr{ambiguous}] ], [ '--progress-timestamp => --progress', '--progress-timestamp', [qr{allowed only under}] ], - [ '-I without init option', '-I dtg', + [ '-I without init option', + '-I dtg', [qr{cannot be used in benchmarking mode}] ], - [ 'invalid init step', '-i -I dta', - [qr{unrecognized initialization step}, - qr{allowed steps are} ] ], - [ 'bad random seed', '--random-seed=one', - [qr{unrecognized random seed option "one": expecting an unsigned integer, "time" or "rand"}, - qr{error while setting random seed from --random-seed option} ] ], + [ 'invalid init step', + '-i -I dta', + [ qr{unrecognized initialization step}, qr{allowed steps are} ] ], + [ 'bad random seed', + '--random-seed=one', + [ +qr{unrecognized random seed option "one": expecting an unsigned integer, "time" or "rand"}, + qr{error while setting random seed from --random-seed option} ] ], # loging sub-options [ 'sampling => log', '--sampling-rate=0.01', @@ -161,23 +165,44 @@ pgbench( 'pgbench builtin list'); my @script_tests = ( + # name, err, { file => contents } - [ 'missing endif', [qr{\\if without matching \\endif}], {'if-noendif.sql' => '\if 1'} ], - [ 'missing if on elif', [qr{\\elif without matching \\if}], {'elif-noif.sql' => '\elif 1'} ], - [ 'missing if on else', [qr{\\else without matching \\if}], {'else-noif.sql' => '\else'} ], - [ 'missing if on endif', [qr{\\endif without matching \\if}], {'endif-noif.sql' => '\endif'} ], - [ 'elif after else', [qr{\\elif after \\else}], {'else-elif.sql' => "\\if 1\n\\else\n\\elif 0\n\\endif"} ], - [ 'else after else', [qr{\\else after \\else}], {'else-else.sql' => "\\if 1\n\\else\n\\else\n\\endif"} ], - [ 'if syntax error', [qr{syntax error in command "if"}], {'if-bad.sql' => "\\if\n\\endif\n"} ], - [ 'elif syntax error', [qr{syntax error in command "elif"}], {'elif-bad.sql' => "\\if 0\n\\elif +\n\\endif\n"} ], - [ 'else syntax error', [qr{unexpected argument in command "else"}], {'else-bad.sql' => "\\if 0\n\\else BAD\n\\endif\n"} ], - [ 'endif syntax error', [qr{unexpected argument in command "endif"}], {'endif-bad.sql' => "\\if 0\n\\endif BAD\n"} ], -); + [ 'missing endif', + [qr{\\if without matching \\endif}], + { 'if-noendif.sql' => '\if 1' } ], + [ 'missing if on elif', + [qr{\\elif without matching \\if}], + { 'elif-noif.sql' => '\elif 1' } ], + [ 'missing if on else', + [qr{\\else without matching \\if}], + { 'else-noif.sql' => '\else' } ], + [ 'missing if on endif', + [qr{\\endif without matching \\if}], + { 'endif-noif.sql' => '\endif' } ], + [ 'elif after else', + [qr{\\elif after \\else}], + { 'else-elif.sql' => "\\if 1\n\\else\n\\elif 0\n\\endif" } ], + [ 'else after else', + [qr{\\else after \\else}], + { 'else-else.sql' => "\\if 1\n\\else\n\\else\n\\endif" } ], + [ 'if syntax error', + [qr{syntax error in command "if"}], + { 'if-bad.sql' => "\\if\n\\endif\n" } ], + [ 'elif syntax error', + [qr{syntax error in command "elif"}], + { 'elif-bad.sql' => "\\if 0\n\\elif +\n\\endif\n" } ], + [ 'else syntax error', + [qr{unexpected argument in command "else"}], + { 'else-bad.sql' => "\\if 0\n\\else BAD\n\\endif\n" } ], + [ 'endif syntax error', + [qr{unexpected argument in command "endif"}], + { 'endif-bad.sql' => "\\if 0\n\\endif BAD\n" } ],); for my $t (@script_tests) { my ($name, $err, $files) = @$t; - pgbench_scripts('', 1, [qr{^$}], $err, 'pgbench option error: ' . $name, $files); + pgbench_scripts('', 1, [qr{^$}], $err, 'pgbench option error: ' . $name, + $files); } done_testing(); diff --git a/src/bin/scripts/t/080_pg_isready.pl b/src/bin/scripts/t/080_pg_isready.pl index d01804da37..6da89e1b04 100644 --- a/src/bin/scripts/t/080_pg_isready.pl +++ b/src/bin/scripts/t/080_pg_isready.pl @@ -16,4 +16,5 @@ $node->init; $node->start; # use a long timeout for the benefit of very slow buildfarm machines -$node->command_ok([qw(pg_isready --timeout=60)], 'succeeds with server running'); +$node->command_ok([qw(pg_isready --timeout=60)], + 'succeeds with server running'); diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index 382210e3b6..4c477a27aa 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -36,13 +36,17 @@ $node->issues_sql_like( $node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)], 'vacuumdb with connection string'); -$node->command_fails([qw(vacuumdb -Zt pg_am;ABORT postgres)], +$node->command_fails( + [qw(vacuumdb -Zt pg_am;ABORT postgres)], 'trailing command in "-t", without COLUMNS'); + # Unwanted; better if it failed. -$node->command_ok([qw(vacuumdb -Zt pg_am(amname);ABORT postgres)], +$node->command_ok( + [qw(vacuumdb -Zt pg_am(amname);ABORT postgres)], 'trailing command in "-t", with COLUMNS'); -$node->safe_psql('postgres', q| +$node->safe_psql( + 'postgres', q| CREATE TABLE "need""q(uot" (")x" text); CREATE FUNCTION f0(int) RETURNS int LANGUAGE SQL AS 'SELECT $1 * $1'; @@ -53,5 +57,6 @@ $node->safe_psql('postgres', q| |); $node->command_ok([qw|vacuumdb -Z --table="need""q(uot"(")x") postgres|], 'column list'); -$node->command_fails([qw|vacuumdb -Zt funcidx postgres|], +$node->command_fails( + [qw|vacuumdb -Zt funcidx postgres|], 'unqualifed name via functional index'); diff --git a/src/include/catalog/duplicate_oids b/src/include/catalog/duplicate_oids index 8c143cf06f..db9b370c2f 100755 --- a/src/include/catalog/duplicate_oids +++ b/src/include/catalog/duplicate_oids @@ -15,11 +15,11 @@ while (<>) next if /^CATALOG\(.*BKI_BOOTSTRAP/; next unless /\boid *=> *'(\d+)'/ - || /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+),/ - || /^CATALOG\([^,]*, *(\d+)/ - || /^DECLARE_INDEX\([^,]*, *(\d+)/ - || /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/ - || /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/; + || /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+),/ + || /^CATALOG\([^,]*, *(\d+)/ + || /^DECLARE_INDEX\([^,]*, *(\d+)/ + || /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/ + || /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/; $oidcounts{$1}++; $oidcounts{$2}++ if $2; } diff --git a/src/include/catalog/reformat_dat_file.pl b/src/include/catalog/reformat_dat_file.pl index 038ba7bb05..8ebbec6c54 100644 --- a/src/include/catalog/reformat_dat_file.pl +++ b/src/include/catalog/reformat_dat_file.pl @@ -73,7 +73,7 @@ foreach my $datfile (@input_files) my $header = "$1.h"; die "There in no header file corresponding to $datfile" - if ! -e $header; + if !-e $header; my $catalog = Catalog::ParseHeader($header); my $catname = $catalog->{catname}; @@ -186,7 +186,7 @@ sub strip_default_values { my $attname = $column->{name}; die "strip_default_values: $catname.$attname undefined\n" - if ! defined $row->{$attname}; + if !defined $row->{$attname}; # Delete values that match defaults. if (defined $column->{default} @@ -196,8 +196,9 @@ sub strip_default_values } # Also delete pg_proc.pronargs, since that can be recomputed. - if ($catname eq 'pg_proc' && $attname eq 'pronargs' && - defined($row->{proargtypes})) + if ( $catname eq 'pg_proc' + && $attname eq 'pronargs' + && defined($row->{proargtypes})) { delete $row->{$attname}; } @@ -210,7 +211,7 @@ sub strip_default_values # data files. sub format_hash { - my $data = shift; + my $data = shift; my @orig_attnames = @_; # Copy attname to new array if it has a value, so we can determine @@ -228,7 +229,7 @@ sub format_hash my $char_count = 1; my $threshold; - my $hash_str = ''; + my $hash_str = ''; my $element_count = 0; foreach my $attname (@attnames) @@ -262,7 +263,7 @@ sub format_hash # Include a leading space in the key-value pair, since this will # always go after either a comma or an additional padding space on # the next line. - my $element = " $attname => '$value'"; + my $element = " $attname => '$value'"; my $element_length = length($element); # If adding the element to the current line would expand the line diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl index f26460e627..ba90231989 100644 --- a/src/test/kerberos/t/001_auth.pl +++ b/src/test/kerberos/t/001_auth.pl @@ -17,12 +17,12 @@ my ($krb5_bin_dir, $krb5_sbin_dir); if ($^O eq 'darwin') { - $krb5_bin_dir = '/usr/local/opt/krb5/bin'; + $krb5_bin_dir = '/usr/local/opt/krb5/bin'; $krb5_sbin_dir = '/usr/local/opt/krb5/sbin'; } elsif ($^O eq 'freebsd') { - $krb5_bin_dir = '/usr/local/bin'; + $krb5_bin_dir = '/usr/local/bin'; $krb5_sbin_dir = '/usr/local/sbin'; } elsif ($^O eq 'linux') @@ -30,45 +30,48 @@ elsif ($^O eq 'linux') $krb5_sbin_dir = '/usr/sbin'; } -my $krb5_config = 'krb5-config'; -my $kinit = 'kinit'; -my $kdb5_util = 'kdb5_util'; +my $krb5_config = 'krb5-config'; +my $kinit = 'kinit'; +my $kdb5_util = 'kdb5_util'; my $kadmin_local = 'kadmin.local'; -my $krb5kdc = 'krb5kdc'; +my $krb5kdc = 'krb5kdc'; if ($krb5_bin_dir && -d $krb5_bin_dir) { $krb5_config = $krb5_bin_dir . '/' . $krb5_config; - $kinit = $krb5_bin_dir . '/' . $kinit; + $kinit = $krb5_bin_dir . '/' . $kinit; } if ($krb5_sbin_dir && -d $krb5_sbin_dir) { - $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; + $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; $kadmin_local = $krb5_sbin_dir . '/' . $kadmin_local; - $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; + $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; } my $realm = 'EXAMPLE.COM'; -my $krb5_conf = "${TestLib::tmp_check}/krb5.conf"; -my $kdc_conf = "${TestLib::tmp_check}/kdc.conf"; -my $krb5_log = "${TestLib::tmp_check}/krb5libs.log"; -my $kdc_log = "${TestLib::tmp_check}/krb5kdc.log"; -my $kdc_port = int(rand() * 16384) + 49152; +my $krb5_conf = "${TestLib::tmp_check}/krb5.conf"; +my $kdc_conf = "${TestLib::tmp_check}/kdc.conf"; +my $krb5_log = "${TestLib::tmp_check}/krb5libs.log"; +my $kdc_log = "${TestLib::tmp_check}/krb5kdc.log"; +my $kdc_port = int(rand() * 16384) + 49152; my $kdc_datadir = "${TestLib::tmp_check}/krb5kdc"; my $kdc_pidfile = "${TestLib::tmp_check}/krb5kdc.pid"; -my $keytab = "${TestLib::tmp_check}/krb5.keytab"; +my $keytab = "${TestLib::tmp_check}/krb5.keytab"; note "setting up Kerberos"; my ($stdout, $krb5_version); -run_log [ $krb5_config, '--version' ], '>', \$stdout or BAIL_OUT("could not execute krb5-config"); +run_log [ $krb5_config, '--version' ], '>', \$stdout + or BAIL_OUT("could not execute krb5-config"); BAIL_OUT("Heimdal is not supported") if $stdout =~ m/heimdal/; -$stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/ or BAIL_OUT("could not get Kerberos version"); +$stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/ + or BAIL_OUT("could not get Kerberos version"); $krb5_version = $1; -append_to_file($krb5_conf, -qq![logging] +append_to_file( + $krb5_conf, + qq![logging] default = FILE:$krb5_log kdc = FILE:$kdc_log @@ -80,27 +83,32 @@ $realm = { kdc = localhost:$kdc_port }!); -append_to_file($kdc_conf, -qq![kdcdefaults] +append_to_file( + $kdc_conf, + qq![kdcdefaults] !); + # For new-enough versions of krb5, use the _listen settings rather # than the _ports settings so that we can bind to localhost only. if ($krb5_version >= 1.15) { - append_to_file($kdc_conf, -qq!kdc_listen = localhost:$kdc_port + append_to_file( + $kdc_conf, + qq!kdc_listen = localhost:$kdc_port kdc_tcp_listen = localhost:$kdc_port !); } else { - append_to_file($kdc_conf, -qq!kdc_ports = $kdc_port + append_to_file( + $kdc_conf, + qq!kdc_ports = $kdc_port kdc_tcp_ports = $kdc_port !); } -append_to_file($kdc_conf, -qq! +append_to_file( + $kdc_conf, + qq! [realms] $realm = { database_name = $kdc_datadir/principal @@ -111,7 +119,7 @@ $realm = { mkdir $kdc_datadir or die; -$ENV{'KRB5_CONFIG'} = $krb5_conf; +$ENV{'KRB5_CONFIG'} = $krb5_conf; $ENV{'KRB5_KDC_PROFILE'} = $kdc_conf; my $service_principal = "$ENV{with_krb_srvnam}/localhost"; @@ -128,7 +136,7 @@ system_or_bail $krb5kdc, '-P', $kdc_pidfile; END { - kill 'INT', `cat $kdc_pidfile` if -f $kdc_pidfile; + kill 'INT', `cat $kdc_pidfile` if -f $kdc_pidfile; } note "setting up PostgreSQL instance"; @@ -148,9 +156,12 @@ sub test_access my ($node, $role, $expected_res, $test_name) = @_; # need to connect over TCP/IP for Kerberos - my $res = $node->psql('postgres', 'SELECT 1', - extra_params => [ '-d', $node->connstr('postgres').' host=localhost', - '-U', $role ]); + my $res = $node->psql( + 'postgres', + 'SELECT 1', + extra_params => [ + '-d', $node->connstr('postgres') . ' host=localhost', + '-U', $role ]); is($res, $expected_res, $test_name); } @@ -171,7 +182,8 @@ test_access($node, 'test1', 0, 'succeeds with mapping'); truncate($node->data_dir . '/pg_ident.conf', 0); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{host all all localhost gss include_realm=0}); +$node->append_conf('pg_hba.conf', + qq{host all all localhost gss include_realm=0}); $node->restart; test_access($node, 'test1', 0, 'succeeds with include_realm=0'); diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl index 3a71e05353..10680b7837 100644 --- a/src/test/ldap/t/001_auth.pl +++ b/src/test/ldap/t/001_auth.pl @@ -15,22 +15,22 @@ else my ($slapd, $ldap_bin_dir, $ldap_schema_dir); -$ldap_bin_dir = undef; # usually in PATH +$ldap_bin_dir = undef; # usually in PATH if ($^O eq 'darwin') { - $slapd = '/usr/local/opt/openldap/libexec/slapd'; + $slapd = '/usr/local/opt/openldap/libexec/slapd'; $ldap_schema_dir = '/usr/local/etc/openldap/schema'; } elsif ($^O eq 'linux') { - $slapd = '/usr/sbin/slapd'; + $slapd = '/usr/sbin/slapd'; $ldap_schema_dir = '/etc/ldap/schema' if -d '/etc/ldap/schema'; $ldap_schema_dir = '/etc/openldap/schema' if -d '/etc/openldap/schema'; } elsif ($^O eq 'freebsd') { - $slapd = '/usr/local/libexec/slapd'; + $slapd = '/usr/local/libexec/slapd'; $ldap_schema_dir = '/usr/local/etc/openldap/schema'; } @@ -41,26 +41,27 @@ elsif ($^O eq 'freebsd') $ENV{PATH} = "$ldap_bin_dir:$ENV{PATH}" if $ldap_bin_dir; -my $ldap_datadir = "${TestLib::tmp_check}/openldap-data"; -my $slapd_certs = "${TestLib::tmp_check}/slapd-certs"; -my $slapd_conf = "${TestLib::tmp_check}/slapd.conf"; +my $ldap_datadir = "${TestLib::tmp_check}/openldap-data"; +my $slapd_certs = "${TestLib::tmp_check}/slapd-certs"; +my $slapd_conf = "${TestLib::tmp_check}/slapd.conf"; my $slapd_pidfile = "${TestLib::tmp_check}/slapd.pid"; my $slapd_logfile = "${TestLib::tmp_check}/slapd.log"; -my $ldap_conf = "${TestLib::tmp_check}/ldap.conf"; -my $ldap_server = 'localhost'; -my $ldap_port = int(rand() * 16384) + 49152; -my $ldaps_port = $ldap_port + 1; -my $ldap_url = "ldap://$ldap_server:$ldap_port"; -my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; -my $ldap_basedn = 'dc=example,dc=net'; -my $ldap_rootdn = 'cn=Manager,dc=example,dc=net'; -my $ldap_rootpw = 'secret'; -my $ldap_pwfile = "${TestLib::tmp_check}/ldappassword"; +my $ldap_conf = "${TestLib::tmp_check}/ldap.conf"; +my $ldap_server = 'localhost'; +my $ldap_port = int(rand() * 16384) + 49152; +my $ldaps_port = $ldap_port + 1; +my $ldap_url = "ldap://$ldap_server:$ldap_port"; +my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; +my $ldap_basedn = 'dc=example,dc=net'; +my $ldap_rootdn = 'cn=Manager,dc=example,dc=net'; +my $ldap_rootpw = 'secret'; +my $ldap_pwfile = "${TestLib::tmp_check}/ldappassword"; note "setting up slapd"; -append_to_file($slapd_conf, -qq{include $ldap_schema_dir/core.schema +append_to_file( + $slapd_conf, + qq{include $ldap_schema_dir/core.schema include $ldap_schema_dir/cosine.schema include $ldap_schema_dir/nis.schema include $ldap_schema_dir/inetorgperson.schema @@ -84,16 +85,23 @@ rootdn "$ldap_rootdn" rootpw $ldap_rootpw}); # don't bother to check the server's cert (though perhaps we should) -append_to_file($ldap_conf, -qq{TLS_REQCERT never +append_to_file( + $ldap_conf, + qq{TLS_REQCERT never }); mkdir $ldap_datadir or die; -mkdir $slapd_certs or die; - -system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", "$slapd_certs/ca.key", "-x509", "-out", "$slapd_certs/ca.crt", "-subj", "/cn=CA"; -system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", "$slapd_certs/server.key", "-out", "$slapd_certs/server.csr", "-subj", "/cn=server"; -system_or_bail "openssl", "x509", "-req", "-in", "$slapd_certs/server.csr", "-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key", "-CAcreateserial", "-out", "$slapd_certs/server.crt"; +mkdir $slapd_certs or die; + +system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", + "$slapd_certs/ca.key", "-x509", "-out", "$slapd_certs/ca.crt", "-subj", + "/cn=CA"; +system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", + "$slapd_certs/server.key", "-out", "$slapd_certs/server.csr", "-subj", + "/cn=server"; +system_or_bail "openssl", "x509", "-req", "-in", "$slapd_certs/server.csr", + "-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key", + "-CAcreateserial", "-out", "$slapd_certs/server.crt"; system_or_bail $slapd, '-f', $slapd_conf, '-h', "$ldap_url $ldaps_url"; @@ -105,15 +113,17 @@ END append_to_file($ldap_pwfile, $ldap_rootpw); chmod 0600, $ldap_pwfile or die; -$ENV{'LDAPURI'} = $ldap_url; +$ENV{'LDAPURI'} = $ldap_url; $ENV{'LDAPBINDDN'} = $ldap_rootdn; -$ENV{'LDAPCONF'} = $ldap_conf; +$ENV{'LDAPCONF'} = $ldap_conf; note "loading LDAP data"; -system_or_bail 'ldapadd', '-x', '-y', $ldap_pwfile, '-f', 'authdata.ldif'; -system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret1', 'uid=test1,dc=example,dc=net'; -system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret2', 'uid=test2,dc=example,dc=net'; +system_or_bail 'ldapadd', '-x', '-y', $ldap_pwfile, '-f', 'authdata.ldif'; +system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret1', + 'uid=test1,dc=example,dc=net'; +system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret2', + 'uid=test2,dc=example,dc=net'; note "setting up PostgreSQL instance"; @@ -131,50 +141,66 @@ sub test_access { my ($node, $role, $expected_res, $test_name) = @_; - my $res = $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]); - is($res, $expected_res, $test_name); + my $res = + $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]); + is($res, $expected_res, $test_name); } note "simple bind"; unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="uid=" ldapsuffix=",dc=example,dc=net"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="uid=" ldapsuffix=",dc=example,dc=net"} +); $node->restart; $ENV{"PGPASSWORD"} = 'wrong'; -test_access($node, 'test0', 2, 'simple bind authentication fails if user not found in LDAP'); -test_access($node, 'test1', 2, 'simple bind authentication fails with wrong password'); +test_access($node, 'test0', 2, + 'simple bind authentication fails if user not found in LDAP'); +test_access($node, 'test1', 2, + 'simple bind authentication fails with wrong password'); $ENV{"PGPASSWORD"} = 'secret1'; test_access($node, 'test1', 0, 'simple bind authentication succeeds'); note "search+bind"; unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn"} +); $node->restart; $ENV{"PGPASSWORD"} = 'wrong'; -test_access($node, 'test0', 2, 'search+bind authentication fails if user not found in LDAP'); -test_access($node, 'test1', 2, 'search+bind authentication fails with wrong password'); +test_access($node, 'test0', 2, + 'search+bind authentication fails if user not found in LDAP'); +test_access($node, 'test1', 2, + 'search+bind authentication fails with wrong password'); $ENV{"PGPASSWORD"} = 'secret1'; test_access($node, 'test1', 0, 'search+bind authentication succeeds'); note "LDAP URLs"; unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"}); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"}); $node->restart; $ENV{"PGPASSWORD"} = 'wrong'; -test_access($node, 'test0', 2, 'search+bind with LDAP URL authentication fails if user not found in LDAP'); -test_access($node, 'test1', 2, 'search+bind with LDAP URL authentication fails with wrong password'); +test_access($node, 'test0', 2, + 'search+bind with LDAP URL authentication fails if user not found in LDAP' +); +test_access($node, 'test1', 2, + 'search+bind with LDAP URL authentication fails with wrong password'); $ENV{"PGPASSWORD"} = 'secret1'; -test_access($node, 'test1', 0, 'search+bind with LDAP URL authentication succeeds'); +test_access($node, 'test1', 0, + 'search+bind with LDAP URL authentication succeeds'); note "search filters"; unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -185,7 +211,9 @@ test_access($node, 'test2@example.net', 0, 'search filter finds by mail'); note "search filters in LDAP URLs"; unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub?(|(uid=\$username)(mail=\$username))"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub?(|(uid=\$username)(mail=\$username))"} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -197,7 +225,9 @@ test_access($node, 'test2@example.net', 0, 'search filter finds by mail'); # settings. ldapurl is always parsed first, then the other settings # override. It might be useful in a case like this. unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -207,7 +237,9 @@ note "diagnostic message"; # note bad ldapprefix with a question mark that triggers a diagnostic message unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="?uid=" ldapsuffix=""}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="?uid=" ldapsuffix=""} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -217,7 +249,9 @@ note "TLS"; # request StartTLS with ldaptls=1 unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)" ldaptls=1}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)" ldaptls=1} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -225,7 +259,9 @@ test_access($node, 'test1', 0, 'StartTLS'); # request LDAPS with ldapscheme=ldaps unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapscheme=ldaps ldapport=$ldaps_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapserver=$ldap_server ldapscheme=ldaps ldapport=$ldaps_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)"} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -233,7 +269,9 @@ test_access($node, 'test1', 0, 'LDAPS'); # request LDAPS with ldapurl=ldaps://... unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)"}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)"} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; @@ -241,7 +279,9 @@ test_access($node, 'test1', 0, 'LDAPS with URL'); # bad combination of LDAPS and StartTLS unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)" ldaptls=1}); +$node->append_conf('pg_hba.conf', +qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)" ldaptls=1} +); $node->restart; $ENV{"PGPASSWORD"} = 'secret1'; diff --git a/src/test/modules/brin/t/01_workitems.pl b/src/test/modules/brin/t/01_workitems.pl index 11c9981d40..c889a8313f 100644 --- a/src/test/modules/brin/t/01_workitems.pl +++ b/src/test/modules/brin/t/01_workitems.pl @@ -15,25 +15,27 @@ $node->start; $node->safe_psql('postgres', 'create extension pageinspect'); # Create a table with an autosummarizing BRIN index -$node->safe_psql('postgres', +$node->safe_psql( + 'postgres', 'create table brin_wi (a int) with (fillfactor = 10); create index brin_wi_idx on brin_wi using brin (a) with (pages_per_range=1, autosummarize=on); ' ); my $count = $node->safe_psql('postgres', - "select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)" +"select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)" ); is($count, '1', "initial index state is correct"); $node->safe_psql('postgres', 'insert into brin_wi select * from generate_series(1, 100)'); -$node->poll_query_until('postgres', - "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)", +$node->poll_query_until( + 'postgres', +"select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)", 't'); $count = $node->safe_psql('postgres', - "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)" +"select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)" ); is($count, 't', "index got summarized"); $node->stop; diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index 2bc4787871..3d61dacce3 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -64,7 +64,7 @@ my %pgdump_runs = ( '--no-sync', "--file=$tempdir/createdb.sql", '-C', - '-R', # no-op, just for testing + '-R', # no-op, just for testing 'postgres', ], }, data_only => { dump_cmd => [ @@ -72,7 +72,7 @@ my %pgdump_runs = ( '--no-sync', "--file=$tempdir/data_only.sql", '-a', - '-v', # no-op, just make sure it works + '-v', # no-op, just make sure it works 'postgres', ], }, defaults => { dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ], @@ -178,14 +178,13 @@ my %pgdump_runs = ( # Tests which are considered 'full' dumps by pg_dump, but there # are flags used to exclude specific items (ACLs, blobs, etc). my %full_runs = ( - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, -); + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + createdb => 1, + defaults => 1, + no_privs => 1, + no_owner => 1,); my %tests = ( 'ALTER EXTENSION test_pg_dump' => { @@ -197,7 +196,7 @@ my %tests = ( \n\s+\Qcol1 integer NOT NULL,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE EXTENSION test_pg_dump' => { create_order => 2, @@ -207,10 +206,9 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, - unlike => { - binary_upgrade => 1, }, }, + unlike => { binary_upgrade => 1, }, }, 'CREATE ROLE regress_dump_test_role' => { create_order => 1, @@ -228,7 +226,7 @@ my %tests = ( \n\s+\QNO MAXVALUE\E \n\s+\QCACHE 1;\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE TABLE regress_pg_dump_table_added' => { create_order => 7, @@ -239,7 +237,7 @@ my %tests = ( \n\s+\Qcol1 integer NOT NULL,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE SEQUENCE regress_pg_dump_seq' => { regexp => qr/^ @@ -250,7 +248,7 @@ my %tests = ( \n\s+\QNO MAXVALUE\E \n\s+\QCACHE 1;\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'SETVAL SEQUENCE regress_seq_dumpable' => { create_order => 6, @@ -260,7 +258,7 @@ my %tests = ( \n/xm, like => { %full_runs, - data_only => 1, + data_only => 1, section_data => 1, }, }, 'CREATE TABLE regress_pg_dump_table' => { @@ -269,13 +267,13 @@ my %tests = ( \n\s+\Qcol1 integer NOT NULL,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE ACCESS METHOD regress_test_am' => { regexp => qr/^ \QCREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'COMMENT ON EXTENSION test_pg_dump' => { regexp => qr/^ @@ -294,7 +292,7 @@ my %tests = ( regexp => qr/^ \QGRANT SELECT ON TABLE public.regress_pg_dump_table_added TO regress_dump_test_role;\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'REVOKE SELECT regress_pg_dump_table_added post-ALTER EXTENSION' => { create_order => 10, @@ -307,8 +305,7 @@ my %tests = ( %full_runs, schema_only => 1, section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + unlike => { no_privs => 1, }, }, 'GRANT SELECT ON TABLE regress_pg_dump_table' => { regexp => qr/^ @@ -316,7 +313,7 @@ my %tests = ( \QGRANT SELECT ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'GRANT SELECT(col1) ON regress_pg_dump_table' => { regexp => qr/^ @@ -324,10 +321,11 @@ my %tests = ( \QGRANT SELECT(col1) ON TABLE public.regress_pg_dump_table TO PUBLIC;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, - 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' => - { create_order => 4, + 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' + => { + create_order => 4, create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role;', regexp => qr/^ @@ -337,8 +335,7 @@ my %tests = ( %full_runs, schema_only => 1, section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + unlike => { no_privs => 1, }, }, 'GRANT USAGE ON regress_pg_dump_table_col1_seq TO regress_dump_test_role' => { @@ -352,14 +349,13 @@ my %tests = ( %full_runs, schema_only => 1, section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + unlike => { no_privs => 1, }, }, 'GRANT USAGE ON regress_pg_dump_seq TO regress_dump_test_role' => { regexp => qr/^ \QGRANT USAGE ON SEQUENCE public.regress_pg_dump_seq TO regress_dump_test_role;\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'REVOKE SELECT(col1) ON regress_pg_dump_table' => { create_order => 3, @@ -372,8 +368,7 @@ my %tests = ( %full_runs, schema_only => 1, section_pre_data => 1, }, - unlike => { - no_privs => 1, }, }, + unlike => { no_privs => 1, }, }, # Objects included in extension part of a schema created by this extension */ 'CREATE TABLE regress_pg_dump_schema.test_table' => { @@ -382,7 +377,7 @@ my %tests = ( \n\s+\Qcol1 integer,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'GRANT SELECT ON regress_pg_dump_schema.test_table' => { regexp => qr/^ @@ -390,7 +385,7 @@ my %tests = ( \QGRANT SELECT ON TABLE regress_pg_dump_schema.test_table TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE SEQUENCE regress_pg_dump_schema.test_seq' => { regexp => qr/^ @@ -401,7 +396,7 @@ my %tests = ( \n\s+\QNO MAXVALUE\E \n\s+\QCACHE 1;\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'GRANT USAGE ON regress_pg_dump_schema.test_seq' => { regexp => qr/^ @@ -409,14 +404,14 @@ my %tests = ( \QGRANT USAGE ON SEQUENCE regress_pg_dump_schema.test_seq TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE TYPE regress_pg_dump_schema.test_type' => { regexp => qr/^ \QCREATE TYPE regress_pg_dump_schema.test_type AS (\E \n\s+\Qcol1 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'GRANT USAGE ON regress_pg_dump_schema.test_type' => { regexp => qr/^ @@ -424,14 +419,14 @@ my %tests = ( \QGRANT ALL ON TYPE regress_pg_dump_schema.test_type TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE FUNCTION regress_pg_dump_schema.test_func' => { regexp => qr/^ \QCREATE FUNCTION regress_pg_dump_schema.test_func() RETURNS integer\E \n\s+\QLANGUAGE sql\E \n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'GRANT ALL ON regress_pg_dump_schema.test_func' => { regexp => qr/^ @@ -439,7 +434,7 @@ my %tests = ( \QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_func() TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'CREATE AGGREGATE regress_pg_dump_schema.test_agg' => { regexp => qr/^ @@ -447,7 +442,7 @@ my %tests = ( \n\s+\QSFUNC = int2_sum,\E \n\s+\QSTYPE = bigint\E \n\);\n/xm, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, 'GRANT ALL ON regress_pg_dump_schema.test_agg' => { regexp => qr/^ @@ -455,7 +450,7 @@ my %tests = ( \QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_agg(smallint) TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, }, + like => { binary_upgrade => 1, }, }, # Objects not included in extension, part of schema created by extension 'CREATE TABLE regress_pg_dump_schema.external_tab' => { @@ -468,9 +463,8 @@ my %tests = ( \n\);\n/xm, like => { %full_runs, - schema_only => 1, - section_pre_data => 1, }, }, -); + schema_only => 1, + section_pre_data => 1, }, },); ######################################### # Create a PG instance to test actually dumping from @@ -504,8 +498,9 @@ foreach my $run (sort keys %pgdump_runs) # Then count all the tests run against each run foreach my $test (sort keys %tests) { - # If there is a like entry, but no unlike entry, then we will test the like case - if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key})) +# If there is a like entry, but no unlike entry, then we will test the like case + if ($tests{$test}->{like}->{$test_key} + && !defined($tests{$test}->{unlike}->{$test_key})) { $num_tests++; } @@ -585,18 +580,18 @@ foreach my $run (sort keys %pgdump_runs) { # Run the test listed as a like, unless it is specifically noted # as an unlike (generally due to an explicit exclusion or similar). - if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key})) + if ($tests{$test}->{like}->{$test_key} + && !defined($tests{$test}->{unlike}->{$test_key})) { - if (!ok($output_file =~ $tests{$test}->{regexp}, "$run: should dump $test")) + if (!ok($output_file =~ $tests{$test}->{regexp}, + "$run: should dump $test")) { diag("Review $run results in $tempdir"); } } else { - if (!ok( - $output_file !~ - $tests{$test}->{regexp}, + if (!ok($output_file !~ $tests{$test}->{regexp}, "$run: should not dump $test")) { diag("Review $run results in $tempdir"); diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index 5a8f084efe..aa81e0c20c 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -284,7 +284,7 @@ sub group_access my $dir_stat = stat($self->data_dir); defined($dir_stat) - or die('unable to stat ' . $self->data_dir); + or die('unable to stat ' . $self->data_dir); return (S_IMODE($dir_stat->mode) == 0750); } @@ -482,8 +482,8 @@ sub init } close $conf; - chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf") - or die("unable to set permissions for $pgdata/postgresql.conf"); + chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf") + or die("unable to set permissions for $pgdata/postgresql.conf"); $self->set_replication_conf if $params{allows_streaming}; $self->enable_archiving if $params{has_archiving}; @@ -510,8 +510,8 @@ sub append_conf TestLib::append_to_file($conffile, $str . "\n"); - chmod($self->group_access() ? 0640 : 0600, $conffile) - or die("unable to set permissions for $conffile"); + chmod($self->group_access() ? 0640 : 0600, $conffile) + or die("unable to set permissions for $conffile"); } =pod @@ -1535,7 +1535,7 @@ sub wait_for_catchup } else { - $lsn_expr = 'pg_current_wal_lsn()' + $lsn_expr = 'pg_current_wal_lsn()'; } print "Waiting for replication conn " . $standby_name . "'s " @@ -1686,8 +1686,8 @@ to check for timeout. retval is undef on timeout. sub pg_recvlogical_upto { - my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) = - @_; + my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) + = @_; my ($stdout, $stderr); my $timeout_exception = 'pg_recvlogical timed out'; diff --git a/src/test/perl/RecursiveCopy.pm b/src/test/perl/RecursiveCopy.pm index 5bce720b35..baf5d0ac63 100644 --- a/src/test/perl/RecursiveCopy.pm +++ b/src/test/perl/RecursiveCopy.pm @@ -71,7 +71,7 @@ sub copypath { croak "if specified, filterfn must be a subroutine reference" unless defined(ref $params{filterfn}) - and (ref $params{filterfn} eq 'CODE'); + and (ref $params{filterfn} eq 'CODE'); $filterfn = $params{filterfn}; } diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index 8047404efd..355ef5fc82 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -169,16 +169,17 @@ sub tempdir_short # not under msys, return the input argument unchanged. sub real_dir { - my $dir = "$_[0]"; - return $dir unless -d $dir; - return $dir unless $Config{osname} eq 'msys'; - my $here = cwd; - chdir $dir; + my $dir = "$_[0]"; + return $dir unless -d $dir; + return $dir unless $Config{osname} eq 'msys'; + my $here = cwd; + chdir $dir; + # this odd way of calling 'pwd -W' is the only way that seems to work. - $dir = qx{sh -c "pwd -W"}; - chomp $dir; - chdir $here; - return $dir; + $dir = qx{sh -c "pwd -W"}; + chomp $dir; + chdir $here; + return $dir; } sub system_log @@ -254,12 +255,9 @@ sub check_mode_recursive # Result defaults to true my $result = 1; - find - ( - {follow_fast => 1, - wanted => - sub - { + find( + { follow_fast => 1, + wanted => sub { my $file_stat = stat($File::Find::name); # Is file in the ignore list? @@ -272,7 +270,7 @@ sub check_mode_recursive } defined($file_stat) - or die("unable to stat $File::Find::name"); + or die("unable to stat $File::Find::name"); my $file_mode = S_IMODE($file_stat->mode); @@ -281,35 +279,39 @@ sub check_mode_recursive { if ($file_mode != $expected_file_mode) { - print(*STDERR, + print( + *STDERR, sprintf("$File::Find::name mode must be %04o\n", - $expected_file_mode)); + $expected_file_mode)); $result = 0; return; } } + # Else a directory? elsif (S_ISDIR($file_stat->mode)) { if ($file_mode != $expected_dir_mode) { - print(*STDERR, + print( + *STDERR, sprintf("$File::Find::name mode must be %04o\n", - $expected_dir_mode)); + $expected_dir_mode)); $result = 0; return; } } + # Else something we can't handle else { die "unknown file type for $File::Find::name"; } - }}, - $dir - ); + } + }, + $dir); return $result; } @@ -319,23 +321,21 @@ sub chmod_recursive { my ($dir, $dir_mode, $file_mode) = @_; - find - ( - {follow_fast => 1, - wanted => - sub - { + find( + { follow_fast => 1, + wanted => sub { my $file_stat = stat($File::Find::name); if (defined($file_stat)) { - chmod(S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode, - $File::Find::name) - or die "unable to chmod $File::Find::name"; + chmod( + S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode, + $File::Find::name + ) or die "unable to chmod $File::Find::name"; } - }}, - $dir - ); + } + }, + $dir); } # Check presence of a given regexp within pg_config.h for the installation @@ -351,7 +351,7 @@ sub check_pg_config chomp($stdout); open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!"; - my $match = (grep {/^$regexp/} <$pg_config_h>); + my $match = (grep { /^$regexp/ } <$pg_config_h>); close $pg_config_h; return $match; } diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index 8b35bc8438..7e1759fbaf 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -80,8 +80,7 @@ is($stdout_recv, $expected, $node_master->poll_query_until('postgres', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'test_slot' AND active_pid IS NULL)" -) - or die "slot never became inactive"; +) or die "slot never became inactive"; $stdout_recv = $node_master->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, 10, diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl index 95f22bc421..93c22d181c 100644 --- a/src/test/recovery/t/009_twophase.pl +++ b/src/test/recovery/t/009_twophase.pl @@ -333,9 +333,9 @@ $cur_master->psql( # Ensure that last transaction is replayed on standby. my $cur_master_lsn = - $cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); + $cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); my $caughtup_query = - "SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()"; + "SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()"; $cur_standby->poll_query_until('postgres', $caughtup_query) or die "Timed out while waiting for standby to catch up"; diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 91a8ef90c1..783c936393 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -30,8 +30,9 @@ $node->init(allows_streaming => 1); $node->start(); # by default PostgresNode doesn't doesn't restart after a crash -$node->safe_psql('postgres', - q[ALTER SYSTEM SET restart_after_crash = 1; +$node->safe_psql( + 'postgres', + q[ALTER SYSTEM SET restart_after_crash = 1; ALTER SYSTEM SET log_connections = 1; SELECT pg_reload_conf();]); @@ -68,7 +69,7 @@ INSERT INTO alive VALUES($$committed-before-sigquit$$); SELECT pg_backend_pid(); ]; ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), - 'acquired pid for SIGQUIT'); + 'acquired pid for SIGQUIT'); my $pid = $killme_stdout; chomp($pid); $killme_stdout = ''; @@ -80,7 +81,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status; ]; ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigquit/m), - 'inserted in-progress-before-sigquit'); + 'inserted in-progress-before-sigquit'); $killme_stdout = ''; $killme_stderr = ''; @@ -93,7 +94,7 @@ SELECT $$psql-connected$$; SELECT pg_sleep(3600); ]; ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m), - 'monitor connected'); + 'monitor connected'); $monitor_stdout = ''; $monitor_stderr = ''; @@ -107,8 +108,12 @@ is($ret, 0, "killed process with SIGQUIT"); $killme_stdin .= q[ SELECT 1; ]; -ok(pump_until($killme, \$killme_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m), - "psql query died successfully after SIGQUIT"); +ok( pump_until( + $killme, + \$killme_stderr, +qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m + ), + "psql query died successfully after SIGQUIT"); $killme_stderr = ''; $killme_stdout = ''; $killme->finish; @@ -116,13 +121,21 @@ $killme->finish; # Wait till server restarts - we should get the WARNING here, but # sometimes the server is unable to send that, if interrupted while # sending. -ok(pump_until($monitor, \$monitor_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m), - "psql monitor died successfully after SIGQUIT"); +ok( pump_until( + $monitor, + \$monitor_stderr, +qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m + ), + "psql monitor died successfully after SIGQUIT"); $monitor->finish; # Wait till server restarts -is($node->poll_query_until('postgres', 'SELECT $$restarted after sigquit$$;', 'restarted after sigquit'), - "1", "reconnected after SIGQUIT"); +is( $node->poll_query_until( + 'postgres', + 'SELECT $$restarted after sigquit$$;', + 'restarted after sigquit'), + "1", + "reconnected after SIGQUIT"); # restart psql processes, now that the crash cycle finished @@ -137,10 +150,10 @@ $killme_stdin .= q[ SELECT pg_backend_pid(); ]; ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), - "acquired pid for SIGKILL"); + "acquired pid for SIGKILL"); $pid = $killme_stdout; chomp($pid); -$pid = $killme_stdout; +$pid = $killme_stdout; $killme_stdout = ''; $killme_stderr = ''; @@ -151,7 +164,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status; ]; ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigkill/m), - 'inserted in-progress-before-sigkill'); + 'inserted in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -164,7 +177,7 @@ SELECT $$psql-connected$$; SELECT pg_sleep(3600); ]; ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m), - 'monitor connected'); + 'monitor connected'); $monitor_stdout = ''; $monitor_stderr = ''; @@ -179,35 +192,51 @@ is($ret, 0, "killed process with KILL"); $killme_stdin .= q[ SELECT 1; ]; -ok(pump_until($killme, \$killme_stderr, qr/server closed the connection unexpectedly/m), - "psql query died successfully after SIGKILL"); +ok( pump_until( + $killme, \$killme_stderr, + qr/server closed the connection unexpectedly/m), + "psql query died successfully after SIGKILL"); $killme->finish; # Wait till server restarts - we should get the WARNING here, but # sometimes the server is unable to send that, if interrupted while # sending. -ok(pump_until($monitor, \$monitor_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m), - "psql monitor died successfully after SIGKILL"); +ok( pump_until( + $monitor, + \$monitor_stderr, +qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m + ), + "psql monitor died successfully after SIGKILL"); $monitor->finish; # Wait till server restarts -is($node->poll_query_until('postgres', 'SELECT 1', '1'), "1", "reconnected after SIGKILL"); +is($node->poll_query_until('postgres', 'SELECT 1', '1'), + "1", "reconnected after SIGKILL"); # Make sure the committed rows survived, in-progress ones not -is($node->safe_psql('postgres', 'SELECT * FROM alive'), - "committed-before-sigquit\ncommitted-before-sigkill", 'data survived'); +is( $node->safe_psql('postgres', 'SELECT * FROM alive'), + "committed-before-sigquit\ncommitted-before-sigkill", + 'data survived'); -is($node->safe_psql('postgres', 'INSERT INTO alive VALUES($$before-orderly-restart$$) RETURNING status'), - 'before-orderly-restart', 'can still write after crash restart'); +is( $node->safe_psql( + 'postgres', +'INSERT INTO alive VALUES($$before-orderly-restart$$) RETURNING status'), + 'before-orderly-restart', + 'can still write after crash restart'); # Just to be sure, check that an orderly restart now still works $node->restart(); -is($node->safe_psql('postgres', 'SELECT * FROM alive'), - "committed-before-sigquit\ncommitted-before-sigkill\nbefore-orderly-restart", 'data survived'); +is( $node->safe_psql('postgres', 'SELECT * FROM alive'), +"committed-before-sigquit\ncommitted-before-sigkill\nbefore-orderly-restart", + 'data survived'); -is($node->safe_psql('postgres', 'INSERT INTO alive VALUES($$after-orderly-restart$$) RETURNING status'), - 'after-orderly-restart', 'can still write after orderly restart'); +is( $node->safe_psql( + 'postgres', + 'INSERT INTO alive VALUES($$after-orderly-restart$$) RETURNING status' + ), + 'after-orderly-restart', + 'can still write after orderly restart'); $node->stop(); @@ -221,7 +250,7 @@ sub pump_until if ($psql_timeout->is_expired) { diag("aborting wait: program timed out"); - diag("stream contents: >>", $$stream,"<<"); + diag("stream contents: >>", $$stream, "<<"); diag("pattern searched for: ", $untl); return 0; @@ -229,7 +258,7 @@ sub pump_until if (not $proc->pumpable()) { diag("aborting wait: program died"); - diag("stream contents: >>", $$stream,"<<"); + diag("stream contents: >>", $$stream, "<<"); diag("pattern searched for: ", $untl); return 0; @@ -239,4 +268,4 @@ sub pump_until } return 1; -}; +} diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl index 446144a783..103c0a2b91 100644 --- a/src/test/recovery/t/014_unlogged_reinit.pl +++ b/src/test/recovery/t/014_unlogged_reinit.pl @@ -32,8 +32,7 @@ my $tablespaceDir = TestLib::tempdir; my $realTSDir = TestLib::real_dir($tablespaceDir); -$node->safe_psql('postgres', - "CREATE TABLESPACE ts1 LOCATION '$realTSDir'"); +$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$realTSDir'"); $node->safe_psql('postgres', 'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1'); @@ -64,11 +63,9 @@ unlink("$pgdata/${ts1UnloggedPath}") $node->start; # check unlogged table in base -ok(-f "$pgdata/${baseUnloggedPath}_init", - 'init fork in base still exists'); -ok(-f "$pgdata/$baseUnloggedPath", - 'main fork in base recreated at startup'); -ok( !-f "$pgdata/${baseUnloggedPath}_vm", +ok(-f "$pgdata/${baseUnloggedPath}_init", 'init fork in base still exists'); +ok(-f "$pgdata/$baseUnloggedPath", 'main fork in base recreated at startup'); +ok(!-f "$pgdata/${baseUnloggedPath}_vm", 'vm fork in base removed at startup'); ok( !-f "$pgdata/${baseUnloggedPath}_fsm", 'fsm fork in base removed at startup'); diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm index e81f4df7c5..5a7ba953e2 100644 --- a/src/test/ssl/ServerSetup.pm +++ b/src/test/ssl/ServerSetup.pm @@ -41,7 +41,8 @@ sub test_connect_ok my ($common_connstr, $connstr, $test_name) = @_; my $cmd = [ - 'psql', '-X', '-A', '-t', '-c', "SELECT \$\$connected with $connstr\$\$", + 'psql', '-X', '-A', '-t', '-c', + "SELECT \$\$connected with $connstr\$\$", '-d', "$common_connstr $connstr" ]; command_ok($cmd, $test_name); @@ -52,7 +53,8 @@ sub test_connect_fails my ($common_connstr, $connstr, $expected_stderr, $test_name) = @_; my $cmd = [ - 'psql', '-X', '-A', '-t', '-c', "SELECT \$\$connected with $connstr\$\$", + 'psql', '-X', '-A', '-t', '-c', + "SELECT \$\$connected with $connstr\$\$", '-d', "$common_connstr $connstr" ]; command_fails_like($cmd, $expected_stderr, $test_name); @@ -89,9 +91,11 @@ sub configure_test_server_for_ssl if (defined($password)) { $node->psql('postgres', -"SET password_encryption='$password_enc'; ALTER USER ssltestuser PASSWORD '$password';"); +"SET password_encryption='$password_enc'; ALTER USER ssltestuser PASSWORD '$password';" + ); $node->psql('postgres', -"SET password_encryption='$password_enc'; ALTER USER anotheruser PASSWORD '$password';"); +"SET password_encryption='$password_enc'; ALTER USER anotheruser PASSWORD '$password';" + ); } # enable logging etc. @@ -149,7 +153,7 @@ sub switch_server_cert sub configure_hba_for_ssl { my ($node, $serverhost, $authmethod) = @_; - my $pgdata = $node->data_dir; + my $pgdata = $node->data_dir; # Only accept SSL connections from localhost. Our tests don't depend on this # but seems best to keep it as narrow as possible for security reasons. diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index 91feac613e..956de962ca 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -53,26 +53,28 @@ configure_test_server_for_ssl($node, $SERVERHOSTADDR, 'trust'); note "testing password-protected keys"; -open my $sslconf, '>', $node->data_dir."/sslconfig.conf"; +open my $sslconf, '>', $node->data_dir . "/sslconfig.conf"; print $sslconf "ssl=on\n"; print $sslconf "ssl_cert_file='server-cn-only.crt'\n"; print $sslconf "ssl_key_file='server-password.key'\n"; print $sslconf "ssl_passphrase_command='echo wrongpassword'\n"; close $sslconf; -command_fails(['pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart'], - 'restart fails with password-protected key file with wrong password'); +command_fails( + [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], + 'restart fails with password-protected key file with wrong password'); $node->_update_pid(0); -open $sslconf, '>', $node->data_dir."/sslconfig.conf"; +open $sslconf, '>', $node->data_dir . "/sslconfig.conf"; print $sslconf "ssl=on\n"; print $sslconf "ssl_cert_file='server-cn-only.crt'\n"; print $sslconf "ssl_key_file='server-password.key'\n"; print $sslconf "ssl_passphrase_command='echo secret1'\n"; close $sslconf; -command_ok(['pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart'], - 'restart succeeds with password-protected key file'); +command_ok( + [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], + 'restart succeeds with password-protected key file'); $node->_update_pid(1); ### Run client-side tests. @@ -89,93 +91,110 @@ $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test"; # The server should not accept non-SSL connections. -test_connect_fails($common_connstr, "sslmode=disable", - qr/\Qno pg_hba.conf entry\E/, - "server doesn't accept non-SSL connections"); +test_connect_fails( + $common_connstr, "sslmode=disable", + qr/\Qno pg_hba.conf entry\E/, + "server doesn't accept non-SSL connections"); # Try without a root cert. In sslmode=require, this should work. In verify-ca # or verify-full mode it should fail. -test_connect_ok($common_connstr, "sslrootcert=invalid sslmode=require", - "connect without server root cert sslmode=require"); -test_connect_fails($common_connstr, "sslrootcert=invalid sslmode=verify-ca", - qr/root certificate file "invalid" does not exist/, - "connect without server root cert sslmode=verify-ca"); -test_connect_fails($common_connstr, "sslrootcert=invalid sslmode=verify-full", - qr/root certificate file "invalid" does not exist/, - "connect without server root cert sslmode=verify-full"); +test_connect_ok( + $common_connstr, + "sslrootcert=invalid sslmode=require", + "connect without server root cert sslmode=require"); +test_connect_fails( + $common_connstr, + "sslrootcert=invalid sslmode=verify-ca", + qr/root certificate file "invalid" does not exist/, + "connect without server root cert sslmode=verify-ca"); +test_connect_fails( + $common_connstr, + "sslrootcert=invalid sslmode=verify-full", + qr/root certificate file "invalid" does not exist/, + "connect without server root cert sslmode=verify-full"); # Try with wrong root cert, should fail. (We're using the client CA as the # root, but the server's key is signed by the server CA.) test_connect_fails($common_connstr, - "sslrootcert=ssl/client_ca.crt sslmode=require", - qr/SSL error/, - "connect with wrong server root cert sslmode=require"); + "sslrootcert=ssl/client_ca.crt sslmode=require", + qr/SSL error/, "connect with wrong server root cert sslmode=require"); test_connect_fails($common_connstr, - "sslrootcert=ssl/client_ca.crt sslmode=verify-ca", - qr/SSL error/, - "connect with wrong server root cert sslmode=verify-ca"); + "sslrootcert=ssl/client_ca.crt sslmode=verify-ca", + qr/SSL error/, "connect with wrong server root cert sslmode=verify-ca"); test_connect_fails($common_connstr, - "sslrootcert=ssl/client_ca.crt sslmode=verify-full", - qr/SSL error/, - "connect with wrong server root cert sslmode=verify-full"); + "sslrootcert=ssl/client_ca.crt sslmode=verify-full", + qr/SSL error/, "connect with wrong server root cert sslmode=verify-full"); # Try with just the server CA's cert. This fails because the root file # must contain the whole chain up to the root CA. test_connect_fails($common_connstr, - "sslrootcert=ssl/server_ca.crt sslmode=verify-ca", - qr/SSL error/, - "connect with server CA cert, without root CA"); + "sslrootcert=ssl/server_ca.crt sslmode=verify-ca", + qr/SSL error/, "connect with server CA cert, without root CA"); # And finally, with the correct root cert. -test_connect_ok($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=require", - "connect with correct server CA cert file sslmode=require"); -test_connect_ok($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca", - "connect with correct server CA cert file sslmode=verify-ca"); -test_connect_ok($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-full", - "connect with correct server CA cert file sslmode=verify-full"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=require", + "connect with correct server CA cert file sslmode=require"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca", + "connect with correct server CA cert file sslmode=verify-ca"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-full", + "connect with correct server CA cert file sslmode=verify-full"); # Test with cert root file that contains two certificates. The client should # be able to pick the right one, regardless of the order in the file. -test_connect_ok($common_connstr, - "sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca", - "cert root file that contains two certificates, order 1"); -test_connect_ok($common_connstr, - "sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca", - "cert root file that contains two certificates, order 2"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca", + "cert root file that contains two certificates, order 1"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca", + "cert root file that contains two certificates, order 2"); # CRL tests # Invalid CRL filename is the same as no CRL, succeeds -test_connect_ok($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid", - "sslcrl option with invalid file name"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid", + "sslcrl option with invalid file name"); # A CRL belonging to a different CA is not accepted, fails -test_connect_fails($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl", - qr/SSL error/, - "CRL belonging to a different CA"); +test_connect_fails( + $common_connstr, +"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl", + qr/SSL error/, + "CRL belonging to a different CA"); # With the correct CRL, succeeds (this cert is not revoked) -test_connect_ok($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl", - "CRL with a non-revoked cert"); +test_connect_ok( + $common_connstr, +"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl", + "CRL with a non-revoked cert"); # Check that connecting with verify-full fails, when the hostname doesn't # match the hostname in the server's certificate. $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; -test_connect_ok($common_connstr, "sslmode=require host=wronghost.test", - "mismatch between host name and server certificate sslmode=require"); -test_connect_ok($common_connstr, "sslmode=verify-ca host=wronghost.test", - "mismatch between host name and server certificate sslmode=verify-ca"); -test_connect_fails($common_connstr, "sslmode=verify-full host=wronghost.test", - qr/\Qserver certificate for "common-name.pg-ssltest.test" does not match host name "wronghost.test"\E/, - "mismatch between host name and server certificate sslmode=verify-full"); +test_connect_ok( + $common_connstr, + "sslmode=require host=wronghost.test", + "mismatch between host name and server certificate sslmode=require"); +test_connect_ok( + $common_connstr, + "sslmode=verify-ca host=wronghost.test", + "mismatch between host name and server certificate sslmode=verify-ca"); +test_connect_fails( + $common_connstr, + "sslmode=verify-full host=wronghost.test", +qr/\Qserver certificate for "common-name.pg-ssltest.test" does not match host name "wronghost.test"\E/, + "mismatch between host name and server certificate sslmode=verify-full"); # Test Subject Alternative Names. switch_server_cert($node, 'server-multiple-alt-names'); @@ -183,20 +202,29 @@ switch_server_cert($node, 'server-multiple-alt-names'); $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; -test_connect_ok($common_connstr, "host=dns1.alt-name.pg-ssltest.test", - "host name matching with X.509 Subject Alternative Names 1"); -test_connect_ok($common_connstr, "host=dns2.alt-name.pg-ssltest.test", - "host name matching with X.509 Subject Alternative Names 2"); -test_connect_ok($common_connstr, "host=foo.wildcard.pg-ssltest.test", - "host name matching with X.509 Subject Alternative Names wildcard"); - -test_connect_fails($common_connstr, "host=wronghost.alt-name.pg-ssltest.test", - qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "wronghost.alt-name.pg-ssltest.test"\E/, - "host name not matching with X.509 Subject Alternative Names"); -test_connect_fails($common_connstr, - "host=deep.subdomain.wildcard.pg-ssltest.test", - qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/, - "host name not matching with X.509 Subject Alternative Names wildcard"); +test_connect_ok( + $common_connstr, + "host=dns1.alt-name.pg-ssltest.test", + "host name matching with X.509 Subject Alternative Names 1"); +test_connect_ok( + $common_connstr, + "host=dns2.alt-name.pg-ssltest.test", + "host name matching with X.509 Subject Alternative Names 2"); +test_connect_ok( + $common_connstr, + "host=foo.wildcard.pg-ssltest.test", + "host name matching with X.509 Subject Alternative Names wildcard"); + +test_connect_fails( + $common_connstr, + "host=wronghost.alt-name.pg-ssltest.test", +qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "wronghost.alt-name.pg-ssltest.test"\E/, + "host name not matching with X.509 Subject Alternative Names"); +test_connect_fails( + $common_connstr, + "host=deep.subdomain.wildcard.pg-ssltest.test", +qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/, + "host name not matching with X.509 Subject Alternative Names wildcard"); # Test certificate with a single Subject Alternative Name. (this gives a # slightly different error message, that's all) @@ -205,16 +233,22 @@ switch_server_cert($node, 'server-single-alt-name'); $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; -test_connect_ok($common_connstr, "host=single.alt-name.pg-ssltest.test", - "host name matching with a single X.509 Subject Alternative Name"); - -test_connect_fails($common_connstr, "host=wronghost.alt-name.pg-ssltest.test", - qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "wronghost.alt-name.pg-ssltest.test"\E/, - "host name not matching with a single X.509 Subject Alternative Name"); -test_connect_fails($common_connstr, - "host=deep.subdomain.wildcard.pg-ssltest.test", - qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/, - "host name not matching with a single X.509 Subject Alternative Name wildcard"); +test_connect_ok( + $common_connstr, + "host=single.alt-name.pg-ssltest.test", + "host name matching with a single X.509 Subject Alternative Name"); + +test_connect_fails( + $common_connstr, + "host=wronghost.alt-name.pg-ssltest.test", +qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "wronghost.alt-name.pg-ssltest.test"\E/, + "host name not matching with a single X.509 Subject Alternative Name"); +test_connect_fails( + $common_connstr, + "host=deep.subdomain.wildcard.pg-ssltest.test", +qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/, +"host name not matching with a single X.509 Subject Alternative Name wildcard" +); # Test server certificate with a CN and SANs. Per RFCs 2818 and 6125, the CN # should be ignored when the certificate has both. @@ -223,13 +257,19 @@ switch_server_cert($node, 'server-cn-and-alt-names'); $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; -test_connect_ok($common_connstr, "host=dns1.alt-name.pg-ssltest.test", - "certificate with both a CN and SANs 1"); -test_connect_ok($common_connstr, "host=dns2.alt-name.pg-ssltest.test", - "certificate with both a CN and SANs 2"); -test_connect_fails($common_connstr, "host=common-name.pg-ssltest.test", - qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 1 other name) does not match host name "common-name.pg-ssltest.test"\E/, - "certificate with both a CN and SANs ignores CN"); +test_connect_ok( + $common_connstr, + "host=dns1.alt-name.pg-ssltest.test", + "certificate with both a CN and SANs 1"); +test_connect_ok( + $common_connstr, + "host=dns2.alt-name.pg-ssltest.test", + "certificate with both a CN and SANs 2"); +test_connect_fails( + $common_connstr, + "host=common-name.pg-ssltest.test", +qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 1 other name) does not match host name "common-name.pg-ssltest.test"\E/, + "certificate with both a CN and SANs ignores CN"); # Finally, test a server certificate that has no CN or SANs. Of course, that's # not a very sensible certificate, but libpq should handle it gracefully. @@ -237,13 +277,15 @@ switch_server_cert($node, 'server-no-names'); $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; -test_connect_ok($common_connstr, - "sslmode=verify-ca host=common-name.pg-ssltest.test", - "server certificate without CN or SANs sslmode=verify-ca"); -test_connect_fails($common_connstr, - "sslmode=verify-full host=common-name.pg-ssltest.test", - qr/could not get server's host name from server certificate/, - "server certificate without CN or SANs sslmode=verify-full"); +test_connect_ok( + $common_connstr, + "sslmode=verify-ca host=common-name.pg-ssltest.test", + "server certificate without CN or SANs sslmode=verify-ca"); +test_connect_fails( + $common_connstr, + "sslmode=verify-full host=common-name.pg-ssltest.test", + qr/could not get server's host name from server certificate/, + "server certificate without CN or SANs sslmode=verify-full"); # Test that the CRL works switch_server_cert($node, 'server-revoked'); @@ -252,13 +294,15 @@ $common_connstr = "user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test"; # Without the CRL, succeeds. With it, fails. -test_connect_ok($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca", - "connects without client-side CRL"); -test_connect_fails($common_connstr, - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl", - qr/SSL error/, - "does not connect with client-side CRL"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca", + "connects without client-side CRL"); +test_connect_fails( + $common_connstr, +"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl", + qr/SSL error/, + "does not connect with client-side CRL"); ### Server-side tests. ### @@ -270,47 +314,51 @@ $common_connstr = "sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR"; # no client cert -test_connect_fails($common_connstr, - "user=ssltestuser sslcert=invalid", - qr/connection requires a valid client certificate/, - "certificate authorization fails without client cert"); +test_connect_fails( + $common_connstr, + "user=ssltestuser sslcert=invalid", + qr/connection requires a valid client certificate/, + "certificate authorization fails without client cert"); # correct client cert -test_connect_ok($common_connstr, - "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", - "certificate authorization succeeds with correct client cert"); +test_connect_ok( + $common_connstr, + "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", + "certificate authorization succeeds with correct client cert"); # client key with wrong permissions -test_connect_fails($common_connstr, - "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_wrongperms_tmp.key", - qr!\Qprivate key file "ssl/client_wrongperms_tmp.key" has group or world access\E!, - "certificate authorization fails because of file permissions"); +test_connect_fails( + $common_connstr, +"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_wrongperms_tmp.key", +qr!\Qprivate key file "ssl/client_wrongperms_tmp.key" has group or world access\E!, + "certificate authorization fails because of file permissions"); # client cert belonging to another user -test_connect_fails($common_connstr, - "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", - qr/certificate authentication failed for user "anotheruser"/, - "certificate authorization fails with client cert belonging to another user"); +test_connect_fails( + $common_connstr, + "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", + qr/certificate authentication failed for user "anotheruser"/, +"certificate authorization fails with client cert belonging to another user"); # revoked client cert -test_connect_fails($common_connstr, - "user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked_tmp.key", - qr/SSL error/, - "certificate authorization fails with revoked client cert"); +test_connect_fails( + $common_connstr, +"user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked_tmp.key", + qr/SSL error/, + "certificate authorization fails with revoked client cert"); # intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file switch_server_cert($node, 'server-cn-only', 'root_ca'); $common_connstr = "user=ssltestuser dbname=certdb sslkey=ssl/client_tmp.key sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; -test_connect_ok($common_connstr, - "sslmode=require sslcert=ssl/client+client_ca.crt", - "intermediate client certificate is provided by client"); +test_connect_ok( + $common_connstr, + "sslmode=require sslcert=ssl/client+client_ca.crt", + "intermediate client certificate is provided by client"); test_connect_fails($common_connstr, "sslmode=require sslcert=ssl/client.crt", - qr/SSL error/, - "intermediate client certificate is missing"); + qr/SSL error/, "intermediate client certificate is missing"); # clean up -unlink("ssl/client_tmp.key", - "ssl/client_wrongperms_tmp.key", - "ssl/client-revoked_tmp.key"); +unlink("ssl/client_tmp.key", "ssl/client_wrongperms_tmp.key", + "ssl/client-revoked_tmp.key"); diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl index a805a3196b..fa3f856646 100644 --- a/src/test/ssl/t/002_scram.pl +++ b/src/test/ssl/t/002_scram.pl @@ -20,7 +20,7 @@ my $SERVERHOSTADDR = '127.0.0.1'; # Determine whether build supports tls-server-end-point. my $supports_tls_server_end_point = - check_pg_config("#define HAVE_X509_GET_SIGNATURE_NID 1"); + check_pg_config("#define HAVE_X509_GET_SIGNATURE_NID 1"); # Allocation of base connection string shared among multiple tests. my $common_connstr; @@ -39,38 +39,41 @@ $node->start; # Configure server for SSL connections, with password handling. configure_test_server_for_ssl($node, $SERVERHOSTADDR, "scram-sha-256", - "pass", "scram-sha-256"); + "pass", "scram-sha-256"); switch_server_cert($node, 'server-cn-only'); $ENV{PGPASSWORD} = "pass"; $common_connstr = -"user=ssltestuser dbname=trustdb sslmode=require hostaddr=$SERVERHOSTADDR"; + "user=ssltestuser dbname=trustdb sslmode=require hostaddr=$SERVERHOSTADDR"; # Default settings test_connect_ok($common_connstr, '', - "SCRAM authentication with default channel binding"); + "SCRAM authentication with default channel binding"); # Channel binding settings -test_connect_ok($common_connstr, +test_connect_ok( + $common_connstr, "scram_channel_binding=tls-unique", "SCRAM authentication with tls-unique as channel binding"); -test_connect_ok($common_connstr, - "scram_channel_binding=''", - "SCRAM authentication without channel binding"); +test_connect_ok($common_connstr, "scram_channel_binding=''", + "SCRAM authentication without channel binding"); if ($supports_tls_server_end_point) { - test_connect_ok($common_connstr, - "scram_channel_binding=tls-server-end-point", - "SCRAM authentication with tls-server-end-point as channel binding"); + test_connect_ok( + $common_connstr, + "scram_channel_binding=tls-server-end-point", + "SCRAM authentication with tls-server-end-point as channel binding"); } else { - test_connect_fails($common_connstr, - "scram_channel_binding=tls-server-end-point", - qr/channel binding type "tls-server-end-point" is not supported by this build/, - "SCRAM authentication with tls-server-end-point as channel binding"); + test_connect_fails( + $common_connstr, + "scram_channel_binding=tls-server-end-point", +qr/channel binding type "tls-server-end-point" is not supported by this build/, + "SCRAM authentication with tls-server-end-point as channel binding"); $number_of_tests++; } -test_connect_fails($common_connstr, +test_connect_fails( + $common_connstr, "scram_channel_binding=not-exists", qr/unsupported SCRAM channel-binding type/, "SCRAM authentication with invalid channel binding"); diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 4050e82bc9..e16b710ef1 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -32,7 +32,8 @@ $node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "INSERT INTO tab_mixed (a, b) VALUES (1, 'foo')"); $node_publisher->safe_psql('postgres', - "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))"); +"CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))" +); # Setup structure on subscriber $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)"); @@ -48,7 +49,8 @@ $node_subscriber->safe_psql('postgres', # replication of the table with included index $node_subscriber->safe_psql('postgres', - "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))"); +"CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))" +); # Setup logical replication my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; @@ -97,7 +99,8 @@ $node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "INSERT INTO tab_include SELECT generate_series(1,50)"); -$node_publisher->safe_psql('postgres', "DELETE FROM tab_include WHERE a > 20"); +$node_publisher->safe_psql('postgres', + "DELETE FROM tab_include WHERE a > 20"); $node_publisher->safe_psql('postgres', "UPDATE tab_include SET a = -a"); $node_publisher->wait_for_catchup($appname); @@ -117,7 +120,8 @@ is( $result, qq(|foo|1 $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_include"); -is($result, qq(20|-20|-1), 'check replicated changes with primary key index with included columns'); +is($result, qq(20|-20|-1), + 'check replicated changes with primary key index with included columns'); # insert some duplicate rows $node_publisher->safe_psql('postgres', diff --git a/src/test/subscription/t/005_encoding.pl b/src/test/subscription/t/005_encoding.pl index 65439f1b28..020bffbbe9 100644 --- a/src/test/subscription/t/005_encoding.pl +++ b/src/test/subscription/t/005_encoding.pl @@ -34,7 +34,7 @@ $node_publisher->wait_for_catchup($appname); # Wait for initial sync to finish as well my $synced_query = - "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; +"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; $node_subscriber->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; diff --git a/src/test/subscription/t/006_rewrite.pl b/src/test/subscription/t/006_rewrite.pl index aa1184c85f..907a913318 100644 --- a/src/test/subscription/t/006_rewrite.pl +++ b/src/test/subscription/t/006_rewrite.pl @@ -30,18 +30,19 @@ $node_publisher->wait_for_catchup($appname); # Wait for initial sync to finish as well my $synced_query = - "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; +"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; $node_subscriber->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; -$node_publisher->safe_psql('postgres', q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');}); +$node_publisher->safe_psql('postgres', + q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');}); $node_publisher->wait_for_catchup($appname); -is($node_subscriber->safe_psql('postgres', q{SELECT a, b FROM test1}), - qq(1|one +is( $node_subscriber->safe_psql('postgres', q{SELECT a, b FROM test1}), + qq(1|one 2|two), - 'initial data replicated to subscriber'); + 'initial data replicated to subscriber'); # DDL that causes a heap rewrite my $ddl2 = "ALTER TABLE test1 ADD c int NOT NULL DEFAULT 0;"; @@ -50,15 +51,16 @@ $node_publisher->safe_psql('postgres', $ddl2); $node_publisher->wait_for_catchup($appname); -$node_publisher->safe_psql('postgres', q{INSERT INTO test1 (a, b, c) VALUES (3, 'three', 33);}); +$node_publisher->safe_psql('postgres', + q{INSERT INTO test1 (a, b, c) VALUES (3, 'three', 33);}); $node_publisher->wait_for_catchup($appname); -is($node_subscriber->safe_psql('postgres', q{SELECT a, b, c FROM test1}), - qq(1|one|0 +is( $node_subscriber->safe_psql('postgres', q{SELECT a, b, c FROM test1}), + qq(1|one|0 2|two|0 3|three|33), - 'data replicated to subscriber'); + 'data replicated to subscriber'); $node_subscriber->stop; $node_publisher->stop; diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl index b219bf33dd..97a927b078 100644 --- a/src/test/subscription/t/007_ddl.pl +++ b/src/test/subscription/t/007_ddl.pl @@ -28,7 +28,8 @@ $node_subscriber->safe_psql('postgres', $node_publisher->wait_for_catchup($appname); -$node_subscriber->safe_psql('postgres', q{ +$node_subscriber->safe_psql( + 'postgres', q{ BEGIN; ALTER SUBSCRIPTION mysub DISABLE; ALTER SUBSCRIPTION mysub SET (slot_name = NONE); diff --git a/src/test/subscription/t/008_diff_schema.pl b/src/test/subscription/t/008_diff_schema.pl index d4849c89a3..f2d5a09122 100644 --- a/src/test/subscription/t/008_diff_schema.pl +++ b/src/test/subscription/t/008_diff_schema.pl @@ -22,11 +22,14 @@ $node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')"); # Setup structure on subscriber -$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999, e int GENERATED BY DEFAULT AS IDENTITY)"); +$node_subscriber->safe_psql('postgres', +"CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999, e int GENERATED BY DEFAULT AS IDENTITY)" +); # Setup logical replication my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab"); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub FOR TABLE test_tab"); my $appname = 'tap_sub'; $node_subscriber->safe_psql('postgres', @@ -42,7 +45,8 @@ $node_subscriber->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; my $result = - $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab"); + $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(c), count(d = 999) FROM test_tab"); is($result, qq(2|2|2), 'check initial data was copied to subscriber'); # Update the rows on the publisher and check the additional columns on @@ -52,19 +56,25 @@ $node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = md5(b)"); $node_publisher->wait_for_catchup($appname); $result = - $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab"); -is($result, qq(2|2|2|2), 'check extra columns contain local defaults after copy'); + $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab"); +is($result, qq(2|2|2|2), + 'check extra columns contain local defaults after copy'); # Change the local values of the extra columns on the subscriber, # update publisher, and check that subscriber retains the expected # values -$node_subscriber->safe_psql('postgres', "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'"); -$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = md5(a::text)"); +$node_subscriber->safe_psql('postgres', + "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'" +); +$node_publisher->safe_psql('postgres', + "UPDATE test_tab SET b = md5(a::text)"); $node_publisher->wait_for_catchup($appname); -$result = - $node_subscriber->safe_psql('postgres', "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab"); +$result = $node_subscriber->safe_psql('postgres', +"SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab" +); is($result, qq(2|2|2), 'check extra columns contain locally changed data'); # Another insert @@ -74,8 +84,10 @@ $node_publisher->safe_psql('postgres', $node_publisher->wait_for_catchup($appname); $result = - $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab"); -is($result, qq(3|3|3|3), 'check extra columns contain local defaults after apply'); + $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab"); +is($result, qq(3|3|3|3), + 'check extra columns contain local defaults after apply'); $node_subscriber->stop; $node_publisher->stop; diff --git a/src/test/subscription/t/009_matviews.pl b/src/test/subscription/t/009_matviews.pl index c55c62c95d..0fc2e3e353 100644 --- a/src/test/subscription/t/009_matviews.pl +++ b/src/test/subscription/t/009_matviews.pl @@ -22,10 +22,13 @@ $node_subscriber->safe_psql('postgres', "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;" ); -$node_publisher->safe_psql('postgres', q{CREATE TABLE test1 (a int PRIMARY KEY, b text)}); -$node_publisher->safe_psql('postgres', q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');}); +$node_publisher->safe_psql('postgres', + q{CREATE TABLE test1 (a int PRIMARY KEY, b text)}); +$node_publisher->safe_psql('postgres', + q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');}); -$node_subscriber->safe_psql('postgres', q{CREATE TABLE test1 (a int PRIMARY KEY, b text);}); +$node_subscriber->safe_psql('postgres', + q{CREATE TABLE test1 (a int PRIMARY KEY, b text);}); $node_publisher->wait_for_catchup($appname); @@ -34,8 +37,10 @@ $node_publisher->wait_for_catchup($appname); # need to make sure they are properly ignored. (bug #15044) # create a MV with some data -$node_publisher->safe_psql('postgres', q{CREATE MATERIALIZED VIEW testmv1 AS SELECT * FROM test1;}); +$node_publisher->safe_psql('postgres', + q{CREATE MATERIALIZED VIEW testmv1 AS SELECT * FROM test1;}); $node_publisher->wait_for_catchup($appname); + # There is no equivalent relation on the subscriber, but MV data is # not replicated, so this does not hang. diff --git a/src/test/subscription/t/010_truncate.pl b/src/test/subscription/t/010_truncate.pl index fe9e0b9ec7..39f2cdb9cd 100644 --- a/src/test/subscription/t/010_truncate.pl +++ b/src/test/subscription/t/010_truncate.pl @@ -42,11 +42,8 @@ $node_subscriber->safe_psql('postgres', "CREATE TABLE tab4 (x int PRIMARY KEY, y int REFERENCES tab3)"); $node_subscriber->safe_psql('postgres', - "CREATE SEQUENCE seq1 OWNED BY tab1.a" -); -$node_subscriber->safe_psql('postgres', - "ALTER SEQUENCE seq1 START 101" -); + "CREATE SEQUENCE seq1 OWNED BY tab1.a"); +$node_subscriber->safe_psql('postgres', "ALTER SEQUENCE seq1 START 101"); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION pub1 FOR TABLE tab1"); @@ -55,11 +52,14 @@ $node_publisher->safe_psql('postgres', $node_publisher->safe_psql('postgres', "CREATE PUBLICATION pub3 FOR TABLE tab3, tab4"); $node_subscriber->safe_psql('postgres', - "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr application_name=sub1' PUBLICATION pub1"); +"CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr application_name=sub1' PUBLICATION pub1" +); $node_subscriber->safe_psql('postgres', - "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr application_name=sub2' PUBLICATION pub2"); +"CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr application_name=sub2' PUBLICATION pub2" +); $node_subscriber->safe_psql('postgres', - "CREATE SUBSCRIPTION sub3 CONNECTION '$publisher_connstr application_name=sub3' PUBLICATION pub3"); +"CREATE SUBSCRIPTION sub3 CONNECTION '$publisher_connstr application_name=sub3' PUBLICATION pub3" +); # Wait for initial sync of all subscriptions my $synced_query = @@ -69,7 +69,8 @@ $node_subscriber->poll_query_until('postgres', $synced_query) # insert data to truncate -$node_subscriber->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab1 VALUES (1), (2), (3)"); $node_publisher->wait_for_catchup('sub1'); @@ -81,13 +82,10 @@ $node_publisher->wait_for_catchup('sub1'); my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab1"); -is($result, qq(0||), - 'truncate replicated'); +is($result, qq(0||), 'truncate replicated'); -$result = $node_subscriber->safe_psql('postgres', - "SELECT nextval('seq1')"); -is($result, qq(1), - 'sequence not restarted'); +$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')"); +is($result, qq(1), 'sequence not restarted'); # truncate with restart identity @@ -95,14 +93,13 @@ $node_publisher->safe_psql('postgres', "TRUNCATE tab1 RESTART IDENTITY"); $node_publisher->wait_for_catchup('sub1'); -$result = $node_subscriber->safe_psql('postgres', - "SELECT nextval('seq1')"); -is($result, qq(101), - 'truncate restarted identities'); +$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')"); +is($result, qq(101), 'truncate restarted identities'); # test publication that does not replicate truncate -$node_subscriber->safe_psql('postgres', "INSERT INTO tab2 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab2 VALUES (1), (2), (3)"); $node_publisher->safe_psql('postgres', "TRUNCATE tab2"); @@ -110,8 +107,7 @@ $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab2"); -is($result, qq(3|1|3), - 'truncate not replicated'); +is($result, qq(3|1|3), 'truncate not replicated'); $node_publisher->safe_psql('postgres', "ALTER PUBLICATION pub2 SET (publish = 'insert, truncate')"); @@ -122,13 +118,14 @@ $node_publisher->wait_for_catchup('sub2'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab2"); -is($result, qq(0||), - 'truncate replicated after publication change'); +is($result, qq(0||), 'truncate replicated after publication change'); # test multiple tables connected by foreign keys -$node_subscriber->safe_psql('postgres', "INSERT INTO tab3 VALUES (1), (2), (3)"); -$node_subscriber->safe_psql('postgres', "INSERT INTO tab4 VALUES (11, 1), (111, 1), (22, 2)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab3 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab4 VALUES (11, 1), (111, 1), (22, 2)"); $node_publisher->safe_psql('postgres', "TRUNCATE tab3, tab4"); @@ -136,20 +133,20 @@ $node_publisher->wait_for_catchup('sub3'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab3"); -is($result, qq(0||), - 'truncate of multiple tables replicated'); +is($result, qq(0||), 'truncate of multiple tables replicated'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(x), max(x) FROM tab4"); -is($result, qq(0||), - 'truncate of multiple tables replicated'); +is($result, qq(0||), 'truncate of multiple tables replicated'); # test truncate of multiple tables, some of which are not published $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION sub2"); $node_publisher->safe_psql('postgres', "DROP PUBLICATION pub2"); -$node_subscriber->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (2), (3)"); -$node_subscriber->safe_psql('postgres', "INSERT INTO tab2 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab1 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab2 VALUES (1), (2), (3)"); $node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2"); @@ -157,9 +154,7 @@ $node_publisher->wait_for_catchup('sub1'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab1"); -is($result, qq(0||), - 'truncate of multiple tables some not published'); +is($result, qq(0||), 'truncate of multiple tables some not published'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab2"); -is($result, qq(3|1|3), - 'truncate of multiple tables some not published'); +is($result, qq(3|1|3), 'truncate of multiple tables some not published'); diff --git a/src/tools/git_changelog b/src/tools/git_changelog index 2fc1565a6c..5c41efa701 100755 --- a/src/tools/git_changelog +++ b/src/tools/git_changelog @@ -102,9 +102,9 @@ my %rel_tags; { my $commit = $1; my $tag = $2; - if ($tag =~ /^REL_\d+_\d+$/ - || $tag =~ /^REL\d+_\d+$/ - || $tag =~ /^REL\d+_\d+_\d+$/) + if ( $tag =~ /^REL_\d+_\d+$/ + || $tag =~ /^REL\d+_\d+$/ + || $tag =~ /^REL\d+_\d+_\d+$/) { $rel_tags{$commit} = $tag; } diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index 82528eaa28..578426994d 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -101,7 +101,7 @@ sub Install # Don't find files of in-tree temporary installations. $_ eq 'share' and $File::Find::prune = 1; - } + } }, @top_dir); CopySetOfFiles('config files', $sample_files, $target . '/share/'); @@ -135,8 +135,8 @@ sub Install 'Information schema data', $target . '/share/', 'src/backend/catalog/', 'sql_features.txt'); CopyFiles( - 'Error code data', $target . '/share/', - 'src/backend/utils/', 'errcodes.txt'); + 'Error code data', $target . '/share/', + 'src/backend/utils/', 'errcodes.txt'); GenerateConversionScript($target); GenerateTimezoneFiles($target, $conf); GenerateTsearchFiles($target); @@ -161,7 +161,7 @@ sub Install # Don't find files of in-tree temporary installations. $_ eq 'share' and $File::Find::prune = 1; - } + } }, @pldirs); CopySetOfFiles('PL Extension files', @@ -693,7 +693,7 @@ sub GenerateNLSFiles { wanted => sub { /^nls\.mk\z/s && !push(@flist, $File::Find::name); - } + } }, "src"); foreach (@flist) diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 1d3ed6b0b1..d6d7b7a860 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -530,11 +530,12 @@ sub mkvcbuild my $perl_path = $solution->{options}->{perl} . '\lib\CORE\*perl*'; - # ActivePerl 5.16 provided perl516.lib; 5.18 provided libperl518.a - # Starting with ActivePerl 5.24, both perlnn.lib and libperlnn.a are provided. - # In this case, prefer .lib. +# ActivePerl 5.16 provided perl516.lib; 5.18 provided libperl518.a +# Starting with ActivePerl 5.24, both perlnn.lib and libperlnn.a are provided. +# In this case, prefer .lib. my @perl_libs = - reverse sort grep { /perl\d+\.lib$|libperl\d+\.a$/ } glob($perl_path); + reverse sort grep { /perl\d+\.lib$|libperl\d+\.a$/ } + glob($perl_path); if (@perl_libs > 0) { $plperl->AddLibrary($perl_libs[0]); @@ -753,7 +754,7 @@ sub mkvcbuild 'hstore', 'contrib/hstore'); my $jsonb_plperl = AddTransformModule( 'jsonb_plperl', 'contrib/jsonb_plperl', - 'plperl', 'src/pl/plperl'); + 'plperl', 'src/pl/plperl'); foreach my $f (@perl_embed_ccflags) { @@ -856,12 +857,12 @@ sub AddSimpleFrontend # Add a simple transform module sub AddTransformModule { - my $n = shift; - my $n_src = shift; - my $pl_proj_name = shift; - my $pl_src = shift; - my $type_name = shift; - my $type_src = shift; + my $n = shift; + my $n_src = shift; + my $pl_proj_name = shift; + my $pl_src = shift; + my $type_name = shift; + my $type_src = shift; my $type_proj = undef; if ($type_name) @@ -995,7 +996,7 @@ sub GenerateContribSqlFiles print "Building $out from $in (contrib/$n)...\n"; my $cont = Project::read_file("contrib/$n/$in"); my $dn = $out; - $dn =~ s/\.sql$//; + $dn =~ s/\.sql$//; $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; my $o; open($o, '>', "contrib/$n/$out") diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm index 9817b9439a..7ccfd7bf2f 100644 --- a/src/tools/msvc/Project.pm +++ b/src/tools/msvc/Project.pm @@ -192,7 +192,7 @@ sub AddDir { next if $subdir eq "\$(top_builddir)/src/timezone" - ; #special case for non-standard include + ; #special case for non-standard include next if $reldir . "/" . $subdir eq "src/backend/port/darwin"; diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index f953460523..696433d272 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -51,7 +51,7 @@ sub _new unless $options->{wal_blocksize}; # undef or 0 means default die "Bad wal_blocksize $options->{wal_blocksize}" unless grep { $_ == $options->{wal_blocksize} } - (1, 2, 4, 8, 16, 32, 64); + (1, 2, 4, 8, 16, 32, 64); $options->{wal_segsize} = 16 unless $options->{wal_segsize}; # undef or 0 means default die "Bad wal_segsize $options->{wal_segsize}" @@ -176,8 +176,7 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c "\n"; print $o "#define RELSEG_SIZE ", (1024 / $self->{options}->{blocksize}) * - $self->{options}->{segsize} * - 1024, "\n"; + $self->{options}->{segsize} * 1024, "\n"; print $o "#define XLOG_BLCKSZ ", 1024 * $self->{options}->{wal_blocksize}, "\n"; @@ -266,17 +265,14 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c chdir('src/backend/utils'); my $pg_language_dat = '../../../src/include/catalog/pg_language.dat'; - my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat'; - if (IsNewer( - 'fmgrtab.c', $pg_language_dat) - || IsNewer( - 'fmgrtab.c', $pg_proc_dat) - || IsNewer( - 'fmgrtab.c', '../../../src/include/access/transam.h') - ) + my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat'; + if ( IsNewer('fmgrtab.c', $pg_language_dat) + || IsNewer('fmgrtab.c', $pg_proc_dat) + || IsNewer('fmgrtab.c', '../../../src/include/access/transam.h')) { system( -"perl -I ../catalog Gen_fmgrtab.pl -I../../../src/include/ $pg_language_dat $pg_proc_dat"); +"perl -I ../catalog Gen_fmgrtab.pl -I../../../src/include/ $pg_language_dat $pg_proc_dat" + ); } chdir('../../..'); @@ -471,6 +467,7 @@ EOF $mf =~ /^POSTGRES_BKI_DATA\s*:?=[^,]+,(.*)\)$/gm || croak "Could not find POSTGRES_BKI_DATA in Makefile\n"; my @bki_data = split /\s+/, $1; + foreach my $bki (@bki_srcs, @bki_data) { next if $bki eq ""; @@ -480,19 +477,20 @@ EOF { chdir('src/backend/catalog'); my $bki_srcs = join(' ../../../src/include/catalog/', @bki_srcs); - system("perl genbki.pl --set-version=$self->{majorver} $bki_srcs"); + system( + "perl genbki.pl --set-version=$self->{majorver} $bki_srcs"); chdir('../../..'); # Copy generated headers to include directory. opendir(my $dh, 'src/backend/catalog/') || die "Can't opendir src/backend/catalog/ $!"; - my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh); + my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh); closedir $dh; foreach my $def_header (@def_headers) { copyFile( - "src/backend/catalog/$def_header", - "src/include/catalog/$def_header"); + "src/backend/catalog/$def_header", + "src/include/catalog/$def_header"); } copyFile( 'src/backend/catalog/schemapg.h', diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm index 2f3480a1f6..3e29089bae 100644 --- a/src/tools/msvc/VSObjectFactory.pm +++ b/src/tools/msvc/VSObjectFactory.pm @@ -53,8 +53,10 @@ sub CreateSolution { return new VS2015Solution(@_); } - # visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it. - elsif (($visualStudioVersion ge '14.10') or ($visualStudioVersion eq '15.00')) + +# visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it. + elsif (($visualStudioVersion ge '14.10') + or ($visualStudioVersion eq '15.00')) { return new VS2017Solution(@_); } @@ -98,8 +100,10 @@ sub CreateProject { return new VC2015Project(@_); } - # visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it. - elsif (($visualStudioVersion ge '14.10') or ($visualStudioVersion eq '15.00')) + +# visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it. + elsif (($visualStudioVersion ge '14.10') + or ($visualStudioVersion eq '15.00')) { return new VC2017Project(@_); } @@ -132,7 +136,8 @@ sub DetermineVisualStudioVersion sub _GetVisualStudioVersion { my ($major, $minor) = @_; - # visual 2017 hasn't changed the nmake version to 15, so still using the older version for comparison. + +# visual 2017 hasn't changed the nmake version to 15, so still using the older version for comparison. if ($major > 14) { carp diff --git a/src/tools/msvc/gendef.pl b/src/tools/msvc/gendef.pl index 9b5bc081e1..806f1576c5 100644 --- a/src/tools/msvc/gendef.pl +++ b/src/tools/msvc/gendef.pl @@ -155,8 +155,8 @@ sub usage usage() unless scalar(@ARGV) == 2 - && ( ($ARGV[0] =~ /\\([^\\]+$)/) - && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64')); + && ( ($ARGV[0] =~ /\\([^\\]+$)/) + && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64')); my $defname = uc $1; my $deffile = "$ARGV[0]/$defname.def"; my $platform = $ARGV[1]; diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index 2d6b67cedf..6838397948 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -254,13 +254,16 @@ sub plcheck { next unless -d "$dir/sql" && -d "$dir/expected"; my $lang; - if ($dir eq 'plpgsql/src') { + if ($dir eq 'plpgsql/src') + { $lang = 'plpgsql'; } - elsif ($dir eq 'tcl') { + elsif ($dir eq 'tcl') + { $lang = 'pltcl'; } - else { + else + { $lang = $dir; } if ($lang eq 'plpython') diff --git a/src/tools/pginclude/pgcheckdefines b/src/tools/pginclude/pgcheckdefines index aa7c9c2fc1..dd12feeca2 100755 --- a/src/tools/pginclude/pgcheckdefines +++ b/src/tools/pginclude/pgcheckdefines @@ -58,7 +58,7 @@ while (<$pipe>) chomp; push @hfiles, $_ unless m|^src/include/port/| - || m|^src/backend/port/\w+/|; + || m|^src/backend/port/\w+/|; } close $pipe or die "$FIND failed: $!"; diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 06a38261c6..2bbbd7b850 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -59,9 +59,8 @@ $excludes ||= "$code_base/src/tools/pgindent/exclude_file_patterns" # easier to configure. Note that the typedefs need trailing newlines. my @whitelist = ("bool\n"); -my %blacklist = map { +"$_\n" => 1 } - qw( FD_SET date interval timestamp ANY - abs allocfunc iterator other pointer printfunc reference string type ); +my %blacklist = map { +"$_\n" => 1 } qw( FD_SET date interval timestamp ANY + abs allocfunc iterator other pointer printfunc reference string type ); # globals my @files; @@ -135,7 +134,7 @@ sub load_typedefs push(@typedefs, @whitelist); # remove blacklisted entries - @typedefs = grep { ! $blacklist{$_} } @typedefs; + @typedefs = grep { !$blacklist{$_} } @typedefs; # write filtered typedefs my $filter_typedefs_fh = new File::Temp(TEMPLATE => "pgtypedefXXXXX"); @@ -394,7 +393,7 @@ File::Find::find( && -f _ && /^.*\.[ch]\z/s && push(@files, $File::Find::name); - } + } }, $code_base) if $code_base; -- 2.40.0