Discussion: https://postgr.es/m/CABUevEzK3cNiHZQ18f5tK0guoT+cN_jWeVzhYYxY=r+1Q3SmoA@mail.gmail.com
$is_supported eq $yesno || next;
- $feature_name =~ s/</</g;
- $feature_name =~ s/>/>/g;
+ $feature_name =~ s/</</g;
+ $feature_name =~ s/>/>/g;
$subfeature_name =~ s/</</g;
$subfeature_name =~ s/>/>/g;
'TransactionId' => 'xid',
'XLogRecPtr' => 'pg_lsn');
- my %catalog;
- my $declaring_attributes = 0;
- my $is_varlen = 0;
- my $is_client_code = 0;
+ my %catalog;
+ my $declaring_attributes = 0;
+ my $is_varlen = 0;
+ my $is_client_code = 0;
- $catalog{columns} = [];
- $catalog{toasting} = [];
- $catalog{indexing} = [];
- $catalog{client_code} = [];
+ $catalog{columns} = [];
+ $catalog{toasting} = [];
+ $catalog{indexing} = [];
+ $catalog{client_code} = [];
- open(my $ifh, '<', $input_file) || die "$input_file: $!";
+ open(my $ifh, '<', $input_file) || die "$input_file: $!";
- # Scan the input file.
- while (<$ifh>)
- {
+ # Scan the input file.
+ while (<$ifh>)
+ {
- # Set appropriate flag when we're in certain code sections.
- if (/^#/)
+ # Set appropriate flag when we're in certain code sections.
+ if (/^#/)
+ {
+ $is_varlen = 1 if /^#ifdef\s+CATALOG_VARLEN/;
+ if (/^#ifdef\s+EXPOSE_TO_CLIENT_CODE/)
{
- $is_varlen = 1 if /^#ifdef\s+CATALOG_VARLEN/;
- if (/^#ifdef\s+EXPOSE_TO_CLIENT_CODE/)
- {
- $is_client_code = 1;
- next;
- }
- next if !$is_client_code;
+ $is_client_code = 1;
+ next;
}
+ next if !$is_client_code;
+ }
- if (!$is_client_code)
+ if (!$is_client_code)
+ {
+ # Strip C-style comments.
+ s;/\*(.|\n)*\*/;;g;
+ if (m;/\*;)
{
- # Strip C-style comments.
- s;/\*(.|\n)*\*/;;g;
- if (m;/\*;)
- {
-
- # handle multi-line comments properly.
- my $next_line = <$ifh>;
- die "$input_file: ends within C-style comment\n"
- if !defined $next_line;
- $_ .= $next_line;
- redo;
- }
- # Strip useless whitespace and trailing semicolons.
- chomp;
- s/^\s+//;
- s/;\s*$//;
- s/\s+/ /g;
+ # handle multi-line comments properly.
+ my $next_line = <$ifh>;
+ die "$input_file: ends within C-style comment\n"
+ if !defined $next_line;
+ $_ .= $next_line;
+ redo;
}
- # Push the data into the appropriate data structure.
- if (/^DECLARE_TOAST\(\s*(\w+),\s*(\d+),\s*(\d+)\)/)
+ # Strip useless whitespace and trailing semicolons.
+ chomp;
+ s/^\s+//;
+ s/;\s*$//;
+ s/\s+/ /g;
+ }
+
+ # Push the data into the appropriate data structure.
+ if (/^DECLARE_TOAST\(\s*(\w+),\s*(\d+),\s*(\d+)\)/)
+ {
+ my ($toast_name, $toast_oid, $index_oid) = ($1, $2, $3);
+ push @{ $catalog{toasting} },
+ "declare toast $toast_oid $index_oid on $toast_name\n";
+ }
+ elsif (/^DECLARE_(UNIQUE_)?INDEX\(\s*(\w+),\s*(\d+),\s*(.+)\)/)
+ {
+ my ($is_unique, $index_name, $index_oid, $using) =
+ ($1, $2, $3, $4);
+ push @{ $catalog{indexing} },
+ sprintf(
+ "declare %sindex %s %s %s\n",
+ $is_unique ? 'unique ' : '',
+ $index_name, $index_oid, $using);
+ }
+ elsif (/^BUILD_INDICES/)
+ {
+ push @{ $catalog{indexing} }, "build indices\n";
+ }
+ elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/)
+ {
+ $catalog{catname} = $1;
+ $catalog{relation_oid} = $2;
+ $catalog{relation_oid_macro} = $3;
+
+ $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : '';
+ $catalog{shared_relation} =
+ /BKI_SHARED_RELATION/ ? ' shared_relation' : '';
+ $catalog{without_oids} =
+ /BKI_WITHOUT_OIDS/ ? ' without_oids' : '';
+ if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/)
{
- my ($toast_name, $toast_oid, $index_oid) = ($1, $2, $3);
- push @{ $catalog{toasting} },
- "declare toast $toast_oid $index_oid on $toast_name\n";
+ $catalog{rowtype_oid} = $1;
+ $catalog{rowtype_oid_clause} = " rowtype_oid $1";
+ $catalog{rowtype_oid_macro} = $2;
}
- elsif (/^DECLARE_(UNIQUE_)?INDEX\(\s*(\w+),\s*(\d+),\s*(.+)\)/)
+ else
{
- my ($is_unique, $index_name, $index_oid, $using) =
- ($1, $2, $3, $4);
- push @{ $catalog{indexing} },
- sprintf(
- "declare %sindex %s %s %s\n",
- $is_unique ? 'unique ' : '',
- $index_name, $index_oid, $using);
+ $catalog{rowtype_oid} = '';
+ $catalog{rowtype_oid_clause} = '';
+ $catalog{rowtype_oid_macro} = '';
}
- elsif (/^BUILD_INDICES/)
+ $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0;
+ $declaring_attributes = 1;
+ }
+ elsif ($is_client_code)
+ {
+ if (/^#endif/)
{
- push @{ $catalog{indexing} }, "build indices\n";
+ $is_client_code = 0;
}
- elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/)
+ else
{
- $catalog{catname} = $1;
- $catalog{relation_oid} = $2;
- $catalog{relation_oid_macro} = $3;
-
- $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : '';
- $catalog{shared_relation} =
- /BKI_SHARED_RELATION/ ? ' shared_relation' : '';
- $catalog{without_oids} =
- /BKI_WITHOUT_OIDS/ ? ' without_oids' : '';
- if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/)
- {
- $catalog{rowtype_oid} = $1;
- $catalog{rowtype_oid_clause} = " rowtype_oid $1";
- $catalog{rowtype_oid_macro} = $2;
- }
- else
- {
- $catalog{rowtype_oid} = '';
- $catalog{rowtype_oid_clause} = '';
- $catalog{rowtype_oid_macro} = '';
- }
- $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0;
- $declaring_attributes = 1;
+ push @{ $catalog{client_code} }, $_;
}
- elsif ($is_client_code)
+ }
+ elsif ($declaring_attributes)
+ {
+ next if (/^{|^$/);
+ if (/^}/)
{
- if (/^#endif/)
- {
- $is_client_code = 0;
- }
- else
- {
- push @{ $catalog{client_code} }, $_;
- }
+ $declaring_attributes = 0;
}
- elsif ($declaring_attributes)
+ else
{
- next if (/^{|^$/);
- if (/^}/)
+ my %column;
+ my @attopts = split /\s+/, $_;
+ my $atttype = shift @attopts;
+ my $attname = shift @attopts;
+ die "parse error ($input_file)"
+ unless ($attname and $atttype);
+
+ if (exists $RENAME_ATTTYPE{$atttype})
{
- $declaring_attributes = 0;
+ $atttype = $RENAME_ATTTYPE{$atttype};
}
- else
+
+ # If the C name ends with '[]' or '[digits]', we have
+ # an array type, so we discard that from the name and
+ # prepend '_' to the type.
+ if ($attname =~ /(\w+)\[\d*\]/)
{
- my %column;
- my @attopts = split /\s+/, $_;
- my $atttype = shift @attopts;
- my $attname = shift @attopts;
- die "parse error ($input_file)"
- unless ($attname and $atttype);
-
- if (exists $RENAME_ATTTYPE{$atttype})
+ $attname = $1;
+ $atttype = '_' . $atttype;
+ }
+
+ $column{type} = $atttype;
+ $column{name} = $attname;
+ $column{is_varlen} = 1 if $is_varlen;
+
+ foreach my $attopt (@attopts)
+ {
+ if ($attopt eq 'BKI_FORCE_NULL')
{
- $atttype = $RENAME_ATTTYPE{$atttype};
+ $column{forcenull} = 1;
}
-
- # If the C name ends with '[]' or '[digits]', we have
- # an array type, so we discard that from the name and
- # prepend '_' to the type.
- if ($attname =~ /(\w+)\[\d*\]/)
+ elsif ($attopt eq 'BKI_FORCE_NOT_NULL')
{
- $attname = $1;
- $atttype = '_' . $atttype;
+ $column{forcenotnull} = 1;
}
- $column{type} = $atttype;
- $column{name} = $attname;
- $column{is_varlen} = 1 if $is_varlen;
+ # We use quotes for values like \0 and \054, to
+ # make sure all compilers and syntax highlighters
+ # can recognize them properly.
+ elsif ($attopt =~ /BKI_DEFAULT\(['"]?([^'"]+)['"]?\)/)
+ {
+ $column{default} = $1;
+ }
+ elsif ($attopt =~ /BKI_LOOKUP\((\w+)\)/)
+ {
+ $column{lookup} = $1;
+ }
+ else
+ {
+ die
+ "unknown column option $attopt on column $attname";
+ }
- foreach my $attopt (@attopts)
+ if ($column{forcenull} and $column{forcenotnull})
{
- if ($attopt eq 'BKI_FORCE_NULL')
- {
- $column{forcenull} = 1;
- }
- elsif ($attopt eq 'BKI_FORCE_NOT_NULL')
- {
- $column{forcenotnull} = 1;
- }
- # We use quotes for values like \0 and \054, to
- # make sure all compilers and syntax highlighters
- # can recognize them properly.
- elsif ($attopt =~ /BKI_DEFAULT\(['"]?([^'"]+)['"]?\)/)
- {
- $column{default} = $1;
- }
- elsif ($attopt =~ /BKI_LOOKUP\((\w+)\)/)
- {
- $column{lookup} = $1;
- }
- else
- {
- die
-"unknown column option $attopt on column $attname";
- }
-
- if ($column{forcenull} and $column{forcenotnull})
- {
- die "$attname is forced both null and not null";
- }
+ die "$attname is forced both null and not null";
}
- push @{ $catalog{columns} }, \%column;
}
+ push @{ $catalog{columns} }, \%column;
}
}
- close $ifh;
+ }
+ close $ifh;
return \%catalog;
}
$input_file =~ /(\w+)\.dat$/
or die "Input file $input_file needs to be a .dat file.\n";
my $catname = $1;
- my $data = [];
+ my $data = [];
# Scan the input file.
while (<$ifd>)
{
$row->{$attname} = $column->{default};
}
- elsif ($catname eq 'pg_proc' && $attname eq 'pronargs' &&
- defined($row->{proargtypes}))
+ elsif ($catname eq 'pg_proc'
+ && $attname eq 'pronargs'
+ && defined($row->{proargtypes}))
{
# pg_proc.pronargs can be derived from proargtypes.
my @proargtypes = split /\s+/, $row->{proargtypes};
if (@missing_fields)
{
die sprintf "missing values for field(s) %s in %s.dat line %s\n",
- join(', ', @missing_fields), $catname, $row->{line_number};
+ join(', ', @missing_fields), $catname, $row->{line_number};
}
}
sub FindDefinedSymbolFromData
{
my ($data, $symbol) = @_;
- foreach my $row (@{ $data })
+ foreach my $row (@{$data})
{
if ($row->{oid_symbol} eq $symbol)
{
# within a given Postgres release, such as fixed OIDs. Do not substitute
# anything that could depend on platform or configuration. (The right place
# to handle those sorts of things is in initdb.c's bootstrap_template1().)
-my $BOOTSTRAP_SUPERUSERID = Catalog::FindDefinedSymbolFromData(
- $catalog_data{pg_authid}, 'BOOTSTRAP_SUPERUSERID');
-my $PG_CATALOG_NAMESPACE = Catalog::FindDefinedSymbolFromData(
- $catalog_data{pg_namespace}, 'PG_CATALOG_NAMESPACE');
+my $BOOTSTRAP_SUPERUSERID =
+ Catalog::FindDefinedSymbolFromData($catalog_data{pg_authid},
+ 'BOOTSTRAP_SUPERUSERID');
+my $PG_CATALOG_NAMESPACE =
+ Catalog::FindDefinedSymbolFromData($catalog_data{pg_namespace},
+ 'PG_CATALOG_NAMESPACE');
# Build lookup tables for OID macro substitutions and for pg_attribute
{
# There is no unique name, so we need to combine access method
# and opclass name.
- my $key = sprintf "%s/%s",
- $row->{opcmethod}, $row->{opcname};
+ my $key = sprintf "%s/%s", $row->{opcmethod}, $row->{opcname};
$opcoids{$key} = $row->{oid};
}
{
# There is no unique name, so we need to combine access method
# and opfamily name.
- my $key = sprintf "%s/%s",
- $row->{opfmethod}, $row->{opfname};
+ my $key = sprintf "%s/%s", $row->{opfmethod}, $row->{opfname};
$opfoids{$key} = $row->{oid};
}
{
$procoids{$prokey} = $row->{oid};
}
+
# Also generate an entry using proname(proargtypes). This is not quite
# identical to regprocedure lookup because we don't worry much about
# special SQL names for types etc; we just use the names in the source
foreach my $row (@{ $catalog_data{pg_type} })
{
$typeoids{ $row->{typname} } = $row->{oid};
- $types{ $row->{typname} } = $row;
+ $types{ $row->{typname} } = $row;
}
# Map catalog name to OID lookup.
pg_operator => \%operoids,
pg_opfamily => \%opfoids,
pg_proc => \%procoids,
- pg_type => \%typeoids
-);
+ pg_type => \%typeoids);
# Generate postgres.bki, postgres.description, postgres.shdescription,
# Complain about unrecognized keys; they are presumably misspelled
foreach my $key (keys %bki_values)
{
- next if $key eq "oid" || $key eq "oid_symbol" || $key eq "descr"
- || $key eq "line_number";
+ next
+ if $key eq "oid"
+ || $key eq "oid_symbol"
+ || $key eq "descr"
+ || $key eq "line_number";
die sprintf "unrecognized field name \"%s\" in %s.dat line %s\n",
- $key, $catname, $bki_values{line_number}
- if (!exists($attnames{$key}));
+ $key, $catname, $bki_values{line_number}
+ if (!exists($attnames{$key}));
}
# Perform required substitutions on fields
if ($atttype eq 'oidvector')
{
@lookupnames = split /\s+/, $bki_values{$attname};
- @lookupoids = lookup_oids($lookup, $catname,
- \%bki_values, @lookupnames);
+ @lookupoids = lookup_oids($lookup, $catname, \%bki_values,
+ @lookupnames);
$bki_values{$attname} = join(' ', @lookupoids);
}
elsif ($atttype eq '_oid')
{
$bki_values{$attname} =~ s/[{}]//g;
@lookupnames = split /,/, $bki_values{$attname};
- @lookupoids = lookup_oids($lookup, $catname,
- \%bki_values, @lookupnames);
- $bki_values{$attname} =
- sprintf "{%s}", join(',', @lookupoids);
+ @lookupoids =
+ lookup_oids($lookup, $catname, \%bki_values,
+ @lookupnames);
+ $bki_values{$attname} = sprintf "{%s}",
+ join(',', @lookupoids);
}
}
else
{
$lookupnames[0] = $bki_values{$attname};
- @lookupoids = lookup_oids($lookup, $catname,
- \%bki_values, @lookupnames);
+ @lookupoids = lookup_oids($lookup, $catname, \%bki_values,
+ @lookupnames);
$bki_values{$attname} = $lookupoids[0];
}
}
morph_row_for_schemapg(\%row, $schema);
push @{ $schemapg_entries{$table_name} },
sprintf "{ %s }",
- join(', ', grep { defined $_ } @row{@attnames});
+ join(', ', grep { defined $_ } @row{@attnames});
}
# Generate entries for system attributes.
# Omit the oid column if the catalog doesn't have them
next
if $table->{without_oids}
- && $attr->{name} eq 'oid';
+ && $attr->{name} eq 'oid';
morph_row_for_pgattr(\%row, $schema, $attr, 1);
print_bki_insert(\%row, $schema);
# compare DefineAttr in bootstrap.c. oidvector and
# int2vector are also treated as not-nullable.
$row->{attnotnull} =
- $type->{typname} eq 'oidvector' ? 't'
- : $type->{typname} eq 'int2vector' ? 't'
- : $type->{typlen} eq 'NAMEDATALEN' ? 't'
- : $type->{typlen} > 0 ? 't'
- : 'f';
+ $type->{typname} eq 'oidvector' ? 't'
+ : $type->{typname} eq 'int2vector' ? 't'
+ : $type->{typlen} eq 'NAMEDATALEN' ? 't'
+ : $type->{typlen} > 0 ? 't'
+ : 'f';
}
else
{
# the "id" pattern in bootscanner.l, currently "[-A-Za-z0-9_]+".
$bki_value = sprintf(qq'"%s"', $bki_value)
if length($bki_value) == 0
- or $bki_value =~ /[^-A-Za-z0-9_]/;
+ or $bki_value =~ /[^-A-Za-z0-9_]/;
push @bki_values, $bki_value;
}
# don't change.
elsif ($atttype eq 'bool')
{
- $row->{$attname} = 'true' if $row->{$attname} eq 't';
+ $row->{$attname} = 'true' if $row->{$attname} eq 't';
$row->{$attname} = 'false' if $row->{$attname} eq 'f';
}
else
{
push @lookupoids, $lookupname;
- warn sprintf "unresolved OID reference \"%s\" in %s.dat line %s\n",
- $lookupname, $catname, $bki_values->{line_number}
- if $lookupname ne '-' and $lookupname ne '0';
+ warn sprintf
+ "unresolved OID reference \"%s\" in %s.dat line %s\n",
+ $lookupname, $catname, $bki_values->{line_number}
+ if $lookupname ne '-' and $lookupname ne '0';
}
}
return @lookupoids;
# Skip for rowtypes of bootstrap catalogs, since they have their
# own naming convention defined elsewhere.
return
- if $typename eq 'pg_type'
- or $typename eq 'pg_proc'
- or $typename eq 'pg_attribute'
- or $typename eq 'pg_class';
+ if $typename eq 'pg_type'
+ or $typename eq 'pg_proc'
+ or $typename eq 'pg_attribute'
+ or $typename eq 'pg_class';
# Transform like so:
# foo_bar -> FOO_BAROID
my $header = "$1.h";
die "There in no header file corresponding to $datfile"
- if ! -e $header;
+ if !-e $header;
my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname};
}
# Fetch some values for later.
-my $FirstBootstrapObjectId = Catalog::FindDefinedSymbol(
- 'access/transam.h', \@include_path, 'FirstBootstrapObjectId');
-my $INTERNALlanguageId = Catalog::FindDefinedSymbolFromData(
- $catalog_data{pg_language}, 'INTERNALlanguageId');
+my $FirstBootstrapObjectId =
+ Catalog::FindDefinedSymbol('access/transam.h', \@include_path,
+ 'FirstBootstrapObjectId');
+my $INTERNALlanguageId =
+ Catalog::FindDefinedSymbolFromData($catalog_data{pg_language},
+ 'INTERNALlanguageId');
print "Generating fmgrtab.c, fmgroids.h, and fmgrprotos.h...\n";
print $tfh
" { $s->{oid}, \"$s->{prosrc}\", $s->{nargs}, $bmap{$s->{strict}}, $bmap{$s->{retset}}, $s->{prosrc} }";
- $fmgr_builtin_oid_index[$s->{oid}] = $fmgr_count++;
+ $fmgr_builtin_oid_index[ $s->{oid} ] = $fmgr_count++;
if ($fmgr_count <= $#fmgr)
{
'successful creation');
# Permissions on PGDATA should be default
- SKIP:
+ SKIP:
{
- skip "unix-style permissions not supported on Windows", 1 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 1
+ if ($windows_os);
ok(check_mode_recursive($datadir, 0700, 0600),
- "check PGDATA permissions");
+ "check PGDATA permissions");
}
}
command_ok([ 'initdb', '-S', $datadir ], 'sync only');
# Check group access on PGDATA
SKIP:
{
- skip "unix-style permissions not supported on Windows", 2 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 2
+ if ($windows_os);
# Init a new db with group access
my $datadir_group = "$tempdir/data_group";
my $tempdir = TestLib::tempdir;
my @walfiles = (
- '00000001000000370000000C.gz',
- '00000001000000370000000D',
- '00000001000000370000000E',
- '00000001000000370000000F.partial',
-);
+ '00000001000000370000000C.gz', '00000001000000370000000D',
+ '00000001000000370000000E', '00000001000000370000000F.partial',);
sub create_files
{
create_files();
-command_fails_like(['pg_archivecleanup'],
- qr/must specify archive location/,
- 'fails if archive location is not specified');
+command_fails_like(
+ ['pg_archivecleanup'],
+ qr/must specify archive location/,
+ 'fails if archive location is not specified');
-command_fails_like(['pg_archivecleanup', $tempdir],
- qr/must specify oldest kept WAL file/,
- 'fails if oldest kept WAL file name is not specified');
+command_fails_like(
+ [ 'pg_archivecleanup', $tempdir ],
+ qr/must specify oldest kept WAL file/,
+ 'fails if oldest kept WAL file name is not specified');
-command_fails_like(['pg_archivecleanup', 'notexist', 'foo'],
- qr/archive location .* does not exist/,
- 'fails if archive location does not exist');
+command_fails_like(
+ [ 'pg_archivecleanup', 'notexist', 'foo' ],
+ qr/archive location .* does not exist/,
+ 'fails if archive location does not exist');
-command_fails_like(['pg_archivecleanup', $tempdir, 'foo', 'bar'],
- qr/too many command-line arguments/,
- 'fails with too many command-line arguments');
+command_fails_like(
+ [ 'pg_archivecleanup', $tempdir, 'foo', 'bar' ],
+ qr/too many command-line arguments/,
+ 'fails with too many command-line arguments');
-command_fails_like(['pg_archivecleanup', $tempdir, 'foo'],
- qr/invalid file name argument/,
- 'fails with invalid restart file name');
+command_fails_like(
+ [ 'pg_archivecleanup', $tempdir, 'foo' ],
+ qr/invalid file name argument/,
+ 'fails with invalid restart file name');
{
# like command_like but checking stderr
my $stderr;
- my $result = IPC::Run::run ['pg_archivecleanup', '-d', '-n', $tempdir, $walfiles[2]], '2>', \$stderr;
+ my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
+ $walfiles[2] ], '2>', \$stderr;
ok($result, "pg_archivecleanup dry run: exit code 0");
- like($stderr, qr/$walfiles[1].*would be removed/, "pg_archivecleanup dry run: matches");
+ like(
+ $stderr,
+ qr/$walfiles[1].*would be removed/,
+ "pg_archivecleanup dry run: matches");
foreach my $fn (@walfiles)
{
ok(-f "$tempdir/$fn", "$fn not removed");
create_files();
- command_ok(['pg_archivecleanup', '-x', '.gz', $tempdir, $walfiles[2] . $suffix],
- "$test_name: runs");
-
- ok(! -f "$tempdir/$walfiles[0]", "$test_name: first older WAL file was cleaned up");
- ok(! -f "$tempdir/$walfiles[1]", "$test_name: second older WAL file was cleaned up");
- ok(-f "$tempdir/$walfiles[2]", "$test_name: restartfile was not cleaned up");
- ok(-f "$tempdir/$walfiles[3]", "$test_name: newer WAL file was not cleaned up");
- ok(-f "$tempdir/unrelated_file", "$test_name: unrelated file was not cleaned up");
+ command_ok(
+ [ 'pg_archivecleanup', '-x', '.gz', $tempdir,
+ $walfiles[2] . $suffix ],
+ "$test_name: runs");
+
+ ok(!-f "$tempdir/$walfiles[0]",
+ "$test_name: first older WAL file was cleaned up");
+ ok(!-f "$tempdir/$walfiles[1]",
+ "$test_name: second older WAL file was cleaned up");
+ ok(-f "$tempdir/$walfiles[2]",
+ "$test_name: restartfile was not cleaned up");
+ ok(-f "$tempdir/$walfiles[3]",
+ "$test_name: newer WAL file was not cleaned up");
+ ok(-f "$tempdir/unrelated_file",
+ "$test_name: unrelated file was not cleaned up");
}
-run_check('', 'pg_archivecleanup');
-run_check('.partial', 'pg_archivecleanup with .partial file');
+run_check('', 'pg_archivecleanup');
+run_check('.partial', 'pg_archivecleanup with .partial file');
run_check('.00000020.backup', 'pg_archivecleanup with .backup file');
umask(0077);
# Initialize node without replication settings
-$node->init(extra => [ '--data-checksums' ]);
+$node->init(extra => ['--data-checksums']);
$node->start;
my $pgdata = $node->data_dir;
# Create a backup directory that is not empty so the next commnd will fail
# but leave the data directory behind
mkdir("$tempdir/backup")
- or BAIL_OUT("unable to create $tempdir/backup");
+ or BAIL_OUT("unable to create $tempdir/backup");
append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data");
$node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
# Make sure main and init forks exist
ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base');
-ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
+ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
# Create files that look like temporary relations to ensure they are ignored.
my $postgresOid = $node->safe_psql('postgres',
q{select oid from pg_database where datname = 'postgres'});
-my @tempRelationFiles = qw(t999_999 t9999_999.1 t999_9999_vm t99999_99999_vm.1);
+my @tempRelationFiles =
+ qw(t999_999 t9999_999.1 t999_9999_vm t99999_99999_vm.1);
foreach my $filename (@tempRelationFiles)
{
# Permissions on backup should be default
SKIP:
{
- skip "unix-style permissions not supported on Windows", 1 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 1
+ if ($windows_os);
ok(check_mode_recursive("$tempdir/backup", 0700, 0600),
- "check backup dir permissions");
+ "check backup dir permissions");
}
# Only archive_status directory should be copied in pg_wal/.
# These files should not be copied.
foreach my $filename (
qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp
- global/pg_internal.init)
- )
+ global/pg_internal.init))
{
ok(!-f "$tempdir/backup/$filename", "$filename not copied");
}
# Unlogged relation forks other than init should not be copied
ok(-f "$tempdir/backup/${baseUnloggedPath}_init",
'unlogged init fork in backup');
-ok(!-f "$tempdir/backup/$baseUnloggedPath",
+ok( !-f "$tempdir/backup/$baseUnloggedPath",
'unlogged main fork not in backup');
# Temp relations should not be copied.
foreach my $filename (@tempRelationFiles)
{
- ok(!-f "$tempdir/backup/base/$postgresOid/$filename",
- "base/$postgresOid/$filename not copied");
+ ok( !-f "$tempdir/backup/base/$postgresOid/$filename",
+ "base/$postgresOid/$filename not copied");
}
# Make sure existing backup_label was ignored.
# Move pg_replslot out of $pgdata and create a symlink to it.
$node->stop;
- # Set umask so test directories and files are created with group permissions
+ # Set umask so test directories and files are created with group permissions
umask(0027);
# Enable group permissions on PGDATA
is(scalar(@tblspc_tars), 1, 'one tablespace tar was created');
rmtree("$tempdir/tarbackup2");
- # Create an unlogged table to test that forks other than init are not copied.
+ # Create an unlogged table to test that forks other than init are not copied.
$node->safe_psql('postgres',
- 'CREATE UNLOGGED TABLE tblspc1_unlogged (id int) TABLESPACE tblspc1;');
+ 'CREATE UNLOGGED TABLE tblspc1_unlogged (id int) TABLESPACE tblspc1;'
+ );
- my $tblspc1UnloggedPath = $node->safe_psql(
- 'postgres', q{select pg_relation_filepath('tblspc1_unlogged')});
+ my $tblspc1UnloggedPath = $node->safe_psql('postgres',
+ q{select pg_relation_filepath('tblspc1_unlogged')});
# Make sure main and init forks exist
- ok(-f "$pgdata/${tblspc1UnloggedPath}_init",
+ ok( -f "$pgdata/${tblspc1UnloggedPath}_init",
'unlogged init fork in tablespace');
- ok(-f "$pgdata/$tblspc1UnloggedPath",
- 'unlogged main fork in tablespace');
+ ok(-f "$pgdata/$tblspc1UnloggedPath", 'unlogged main fork in tablespace');
- # Create files that look like temporary relations to ensure they are ignored
- # in a tablespace.
+ # Create files that look like temporary relations to ensure they are ignored
+ # in a tablespace.
my @tempRelationFiles = qw(t888_888 t888888_888888_vm.1);
- my $tblSpc1Id = basename(dirname(dirname($node->safe_psql('postgres',
- q{select pg_relation_filepath('test1')}))));
+ my $tblSpc1Id = basename(
+ dirname(
+ dirname(
+ $node->safe_psql(
+ 'postgres', q{select pg_relation_filepath('test1')}))));
foreach my $filename (@tempRelationFiles)
{
-l "$tempdir/backup1/pg_tblspc/$_"
and readlink "$tempdir/backup1/pg_tblspc/$_" eq
"$tempdir/tbackup/tblspc1"
- } readdir($dh)),
+ } readdir($dh)),
"tablespace symlink was updated");
closedir $dh;
# Group access should be enabled on all backup files
ok(check_mode_recursive("$tempdir/backup1", 0750, 0640),
- "check backup dir permissions");
+ "check backup dir permissions");
# Unlogged relation forks other than init should not be copied
- my ($tblspc1UnloggedBackupPath) = $tblspc1UnloggedPath =~ /[^\/]*\/[^\/]*\/[^\/]*$/g;
+ my ($tblspc1UnloggedBackupPath) =
+ $tblspc1UnloggedPath =~ /[^\/]*\/[^\/]*\/[^\/]*$/g;
ok(-f "$tempdir/tbackup/tblspc1/${tblspc1UnloggedBackupPath}_init",
'unlogged init fork in tablespace backup');
# Temp relations should not be copied.
foreach my $filename (@tempRelationFiles)
{
- ok(!-f "$tempdir/tbackup/tblspc1/$tblSpc1Id/$postgresOid/$filename",
- "[tblspc1]/$postgresOid/$filename not copied");
+ ok( !-f "$tempdir/tbackup/tblspc1/$tblSpc1Id/$postgresOid/$filename",
+ "[tblspc1]/$postgresOid/$filename not copied");
# Also remove temp relation files or tablespace drop will fail.
my $filepath =
- "$shorter_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename";
+ "$shorter_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename";
unlink($filepath)
- or BAIL_OUT("unable to unlink $filepath");
+ or BAIL_OUT("unable to unlink $filepath");
}
ok( -d "$tempdir/backup1/pg_replslot",
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C' ],
+ [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C' ],
'pg_basebackup -C fails without slot name');
$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0', '--no-slot' ],
+ [ 'pg_basebackup', '-D',
+ "$tempdir/backupxs_slot", '-C',
+ '-S', 'slot0',
+ '--no-slot' ],
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
- [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
+ [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
-is($node->safe_psql('postgres', q{SELECT slot_name FROM pg_replication_slots WHERE slot_name = 'slot0'}),
- 'slot0',
- 'replication slot was created');
-isnt($node->safe_psql('postgres', q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot0'}),
- '',
- 'restart LSN of new slot is not null');
+is( $node->safe_psql(
+ 'postgres',
+q{SELECT slot_name FROM pg_replication_slots WHERE slot_name = 'slot0'}),
+ 'slot0',
+ 'replication slot was created');
+isnt(
+ $node->safe_psql(
+ 'postgres',
+q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot0'}),
+ '',
+ 'restart LSN of new slot is not null');
$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ],
+ [ 'pg_basebackup', '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->safe_psql('postgres',
# create tables to corrupt and get their relfilenodes
my $file_corrupt1 = $node->safe_psql('postgres',
- q{SELECT a INTO corrupt1 FROM generate_series(1,10000) AS a; ALTER TABLE corrupt1 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt1')}
+q{SELECT a INTO corrupt1 FROM generate_series(1,10000) AS a; ALTER TABLE corrupt1 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt1')}
);
my $file_corrupt2 = $node->safe_psql('postgres',
- q{SELECT b INTO corrupt2 FROM generate_series(1,2) AS b; ALTER TABLE corrupt2 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt2')}
+q{SELECT b INTO corrupt2 FROM generate_series(1,2) AS b; ALTER TABLE corrupt2 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt2')}
);
# set page header and block sizes
close $file;
system_or_bail 'pg_ctl', '-D', $pgdata, 'start';
-$node->command_checks_all([ 'pg_basebackup', '-D', "$tempdir/backup_corrupt"],
+$node->command_checks_all(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt" ],
1,
[qr{^$}],
[qr/^WARNING.*checksum verification failed/s],
- 'pg_basebackup reports checksum mismatch'
-);
+ 'pg_basebackup reports checksum mismatch');
rmtree("$tempdir/backup_corrupt");
# induce further corruption in 5 more blocks
system_or_bail 'pg_ctl', '-D', $pgdata, 'stop';
open $file, '+<', "$pgdata/$file_corrupt1";
-for my $i ( 1..5 ) {
- my $offset = $pageheader_size + $i * $block_size;
- seek($file, $offset, 0);
- syswrite($file, '\0\0\0\0\0\0\0\0\0');
+for my $i (1 .. 5)
+{
+ my $offset = $pageheader_size + $i * $block_size;
+ seek($file, $offset, 0);
+ syswrite($file, '\0\0\0\0\0\0\0\0\0');
}
close $file;
system_or_bail 'pg_ctl', '-D', $pgdata, 'start';
-$node->command_checks_all([ 'pg_basebackup', '-D', "$tempdir/backup_corrupt2"],
- 1,
- [qr{^$}],
- [qr/^WARNING.*further.*failures.*will.not.be.reported/s],
- 'pg_basebackup does not report more than 5 checksum mismatches'
-);
+$node->command_checks_all(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt2" ],
+ 1,
+ [qr{^$}],
+ [qr/^WARNING.*further.*failures.*will.not.be.reported/s],
+ 'pg_basebackup does not report more than 5 checksum mismatches');
rmtree("$tempdir/backup_corrupt2");
# induce corruption in a second file
close $file;
system_or_bail 'pg_ctl', '-D', $pgdata, 'start';
-$node->command_checks_all([ 'pg_basebackup', '-D', "$tempdir/backup_corrupt3"],
- 1,
- [qr{^$}],
- [qr/^WARNING.*7 total checksum verification failures/s],
- 'pg_basebackup correctly report the total number of checksum mismatches'
-);
+$node->command_checks_all(
+ [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt3" ],
+ 1,
+ [qr{^$}],
+ [qr/^WARNING.*7 total checksum verification failures/s],
+ 'pg_basebackup correctly report the total number of checksum mismatches');
rmtree("$tempdir/backup_corrupt3");
# do not verify checksums, should return ok
$node->command_ok(
- [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt4", '-k' ],
+ [ 'pg_basebackup', '-D', "$tempdir/backup_corrupt4", '-k' ],
'pg_basebackup with -k does not report checksum mismatch');
rmtree("$tempdir/backup_corrupt4");
is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
'dropping a replication slot');
-is($primary->slot($slot_name)->{'slot_type'}, '', 'replication slot was removed');
+is($primary->slot($slot_name)->{'slot_type'},
+ '', 'replication slot was removed');
# Generate some WAL. Use --synchronous at the same time to add more
# code coverage. Switch to the next segment first so that subsequent
# Permissions on WAL files should be default
SKIP:
{
- skip "unix-style permissions not supported on Windows", 1 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 1
+ if ($windows_os);
ok(check_mode_recursive($stream_dir, 0700, 0600),
- "check stream dir permissions");
+ "check stream dir permissions");
}
# check with a corrupted pg_control
my $pg_control = $node->data_dir . '/global/pg_control';
-my $size = (stat($pg_control))[7];
+my $size = (stat($pg_control))[7];
open my $fh, '>', $pg_control or BAIL_OUT($!);
binmode $fh;
+
# fill file with zeros
print $fh pack("x[$size]");
close $fh;
-command_checks_all([ 'pg_controldata', $node->data_dir ],
- 0,
- [ qr/WARNING: Calculated CRC checksum does not match value stored in file/,
- qr/WARNING: invalid WAL segment size/ ],
- [ qr/^$/ ],
- 'pg_controldata with corrupted pg_control');
+command_checks_all(
+ [ 'pg_controldata', $node->data_dir ],
+ 0,
+ [
+qr/WARNING: Calculated CRC checksum does not match value stored in file/,
+ qr/WARNING: invalid WAL segment size/ ],
+ [qr/^$/],
+ 'pg_controldata with corrupted pg_control');
# Windows but we still want to do the restart test.
my $logFileName = "$tempdir/data/perm-test-600.log";
-command_ok(
- [ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ],
+command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ],
'pg_ctl restart with server not running');
# Permissions on log file should be default
SKIP:
{
- skip "unix-style permissions not supported on Windows", 2 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 2
+ if ($windows_os);
ok(-f $logFileName);
ok(check_mode_recursive("$tempdir/data", 0700, 0600));
# Tests which target the 'dump_test' schema, specifically.
my %dump_test_schema_runs = (
- only_dump_test_schema => 1,
- test_schema_plus_blobs => 1,
-);
+ only_dump_test_schema => 1,
+ test_schema_plus_blobs => 1,);
# Tests which are considered 'full' dumps by pg_dump, but there
# are flags used to exclude specific items (ACLs, blobs, etc).
my %full_runs = (
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_owner => 1,
- no_privs => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- with_oids => 1,
-);
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_owner => 1,
+ no_privs => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ with_oids => 1,);
# This is where the actual tests are defined.
my %tests = (
\QFOR ROLE regress_dump_test_role IN SCHEMA dump_test \E
\QGRANT SELECT ON TABLES TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_post_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE' => {
create_order => 55,
\QFOR ROLE regress_dump_test_role \E
\QREVOKE ALL ON FUNCTIONS FROM PUBLIC;\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_post_data => 1, },
+ unlike => { no_privs => 1, }, },
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT'
=> {
\QFOR ROLE regress_dump_test_role \E
\QGRANT INSERT,REFERENCES,DELETE,TRIGGER,TRUNCATE,UPDATE ON TABLES TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_post_data => 1, },
+ unlike => { no_privs => 1, }, },
'ALTER ROLE regress_dump_test_role' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER ROLE regress_dump_test_role WITH \E
\QNOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN \E
\QNOREPLICATION NOBYPASSRLS;\E
'ALTER COLLATION test0 OWNER TO' => {
regexp => qr/^ALTER COLLATION public.test0 OWNER TO .*;/m,
collation => 1,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- %dump_test_schema_runs,
- no_owner => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { %dump_test_schema_runs, no_owner => 1, }, },
'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => {
- regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .*;/m,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .*;/m,
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER SERVER s1 OWNER TO' => {
- regexp => qr/^ALTER SERVER s1 OWNER TO .*;/m,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ regexp => qr/^ALTER SERVER s1 OWNER TO .*;/m,
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER FUNCTION dump_test.pltestlang_call_handler() OWNER TO' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER FUNCTION dump_test.pltestlang_call_handler() \E
\QOWNER TO \E
.*;/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER OPERATOR FAMILY dump_test.op_family OWNER TO' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER OPERATOR FAMILY dump_test.op_family USING btree \E
\QOWNER TO \E
.*;/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER OPERATOR FAMILY dump_test.op_family USING btree' => {
create_order => 75,
\QFUNCTION 1 (integer, integer) btint4cmp(integer,integer) ,\E\n\s+
\QFUNCTION 2 (integer, integer) btint4sortsupport(internal);\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'ALTER OPERATOR CLASS dump_test.op_class OWNER TO' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER OPERATOR CLASS dump_test.op_class USING btree \E
\QOWNER TO \E
.*;/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER PUBLICATION pub1 OWNER TO' => {
- regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .*;/m,
- like => {
- %full_runs,
- section_post_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .*;/m,
+ like => { %full_runs, section_post_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER LARGE OBJECT ... OWNER TO' => {
- regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m,
- like => {
+ regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m,
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1, },
+ column_inserts => 1,
+ data_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1, },
unlike => {
- no_blobs => 1,
- no_owner => 1,
+ no_blobs => 1,
+ no_owner => 1,
schema_only => 1, }, },
'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => {
- regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .*;/m,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .*;/m,
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER SCHEMA dump_test OWNER TO' => {
- regexp => qr/^ALTER SCHEMA dump_test OWNER TO .*;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ regexp => qr/^ALTER SCHEMA dump_test OWNER TO .*;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER SCHEMA dump_test_second_schema OWNER TO' => {
- regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .*;/m,
- like => {
+ regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .*;/m,
+ like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER SEQUENCE test_table_col1_seq' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER SEQUENCE dump_test.test_table_col1_seq OWNED BY dump_test.test_table.col1;\E
/xm,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER SEQUENCE test_third_table_col1_seq' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER SEQUENCE dump_test_second_schema.test_third_table_col1_seq OWNED BY dump_test_second_schema.test_third_table.col1;\E
/xm,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, }, },
+ role => 1,
+ section_pre_data => 1, }, },
'ALTER TABLE ONLY test_table ADD CONSTRAINT ... PRIMARY KEY' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER TABLE ONLY dump_test.test_table\E \n^\s+
\QADD CONSTRAINT test_table_pkey PRIMARY KEY (col1);\E
/xm,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col1 SET STATISTICS 90' => {
create_order => 93,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col2 SET STORAGE' => {
create_order => 94,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col3 SET STORAGE' => {
create_order => 95,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col4 SET n_distinct' => {
create_order => 95,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2'
=> {
- regexp => qr/^
+ regexp => qr/^
\QALTER TABLE ONLY dump_test.measurement ATTACH PARTITION dump_test_second_schema.measurement_y2006m2 \E
\QFOR VALUES FROM ('2006-02-01') TO ('2006-03-01');\E\n
/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'ALTER TABLE test_table CLUSTER ON test_table_pkey' => {
create_order => 96,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE test_table DISABLE TRIGGER ALL' => {
- regexp => qr/^
+ regexp => qr/^
\QSET SESSION AUTHORIZATION 'test_superuser';\E\n\n
\QALTER TABLE dump_test.test_table DISABLE TRIGGER ALL;\E\n\n
\QCOPY dump_test.test_table (col1, col2, col3, col4) FROM stdin;\E
\n(?:\d\t\\N\t\\N\t\\N\n){9}\\\.\n\n\n
\QALTER TABLE dump_test.test_table ENABLE TRIGGER ALL;\E/xm,
- like => { data_only => 1, }, },
+ like => { data_only => 1, }, },
'ALTER FOREIGN TABLE foreign_table ALTER COLUMN c1 OPTIONS' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER FOREIGN TABLE dump_test.foreign_table ALTER COLUMN c1 OPTIONS (\E\n
\s+\Qcolumn_name 'col1'\E\n
\Q);\E\n
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'ALTER TABLE test_table OWNER TO' => {
- regexp => qr/^ALTER TABLE dump_test.test_table OWNER TO .*;/m,
- like => {
+ regexp => qr/^ALTER TABLE dump_test.test_table OWNER TO .*;/m,
+ like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- no_owner => 1, }, },
+ exclude_test_table => 1,
+ no_owner => 1, }, },
'ALTER TABLE test_table ENABLE ROW LEVEL SECURITY' => {
create_order => 23,
create_sql => 'ALTER TABLE dump_test.test_table
ENABLE ROW LEVEL SECURITY;',
- regexp => qr/^ALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;/m,
- like => {
+ regexp =>
+ qr/^ALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;/m,
+ like => {
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1, },
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER TABLE test_second_table OWNER TO' => {
- regexp => qr/^ALTER TABLE dump_test.test_second_table OWNER TO .*;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ regexp => qr/^ALTER TABLE dump_test.test_second_table OWNER TO .*;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER TABLE test_third_table OWNER TO' => {
- regexp => qr/^ALTER TABLE dump_test_second_schema.test_third_table OWNER TO .*;/m,
- like => {
+ regexp =>
+qr/^ALTER TABLE dump_test_second_schema.test_third_table OWNER TO .*;/m,
+ like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER TABLE measurement OWNER TO' => {
- regexp => qr/^ALTER TABLE dump_test.measurement OWNER TO .*;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ regexp => qr/^ALTER TABLE dump_test.measurement OWNER TO .*;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER TABLE measurement_y2006m2 OWNER TO' => {
- regexp => qr/^ALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO .*;/m,
- like => {
+ regexp =>
+qr/^ALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO .*;/m,
+ like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_owner => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_owner => 1, }, },
'ALTER FOREIGN TABLE foreign_table OWNER TO' => {
- regexp => qr/^ALTER FOREIGN TABLE dump_test.foreign_table OWNER TO .*;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ regexp =>
+ qr/^ALTER FOREIGN TABLE dump_test.foreign_table OWNER TO .*;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO' => {
regexp =>
- qr/^ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 OWNER TO .*;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+qr/^ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 OWNER TO .*;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1, }, },
+ no_owner => 1, }, },
'ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO' => {
regexp =>
- qr/^ALTER TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 OWNER TO .*;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+qr/^ALTER TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 OWNER TO .*;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
only_dump_test_table => 1,
regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1, },
+ column_inserts => 1,
+ data_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1, },
unlike => {
schema_only => 1,
- no_blobs => 1, }, },
+ no_blobs => 1, }, },
'BLOB load (using lo_from_bytea)' => {
- regexp => qr/^
+ regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lowrite(0, \E
\Q'\x310a320a330a340a350a360a370a380a390a');\E\n
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- section_data => 1,
- test_schema_plus_blobs => 1, },
+ column_inserts => 1,
+ data_only => 1,
+ section_data => 1,
+ test_schema_plus_blobs => 1, },
unlike => {
binary_upgrade => 1,
- no_blobs => 1,
- schema_only => 1, }, },
+ no_blobs => 1,
+ schema_only => 1, }, },
'COMMENT ON DATABASE postgres' => {
- regexp => qr/^COMMENT ON DATABASE postgres IS .*;/m,
+ regexp => qr/^COMMENT ON DATABASE postgres IS .*;/m,
+
# Should appear in the same tests as "CREATE DATABASE postgres"
- like => { createdb => 1, }, },
+ like => { createdb => 1, }, },
'COMMENT ON EXTENSION plpgsql' => {
- regexp => qr/^COMMENT ON EXTENSION plpgsql IS .*;/m,
+ regexp => qr/^COMMENT ON EXTENSION plpgsql IS .*;/m,
+
# this shouldn't ever get emitted anymore
- like => {}, },
+ like => {}, },
'COMMENT ON TABLE dump_test.test_table' => {
create_order => 36,
create_sql => 'COMMENT ON TABLE dump_test.test_table
IS \'comment on table\';',
- regexp => qr/^COMMENT ON TABLE dump_test.test_table IS 'comment on table';/m,
- like => {
+ regexp =>
+ qr/^COMMENT ON TABLE dump_test.test_table IS 'comment on table';/m,
+ like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'COMMENT ON COLUMN dump_test.test_table.col1' => {
create_order => 36,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'COMMENT ON COLUMN dump_test.composite.f1' => {
create_order => 44,
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.composite.f1 IS 'comment on column of type';\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON COLUMN dump_test.test_second_table.col1' => {
create_order => 63,
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.test_second_table.col1 IS 'comment on column col1';\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON COLUMN dump_test.test_second_table.col2' => {
create_order => 64,
regexp => qr/^
\QCOMMENT ON COLUMN dump_test.test_second_table.col2 IS 'comment on column col2';\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON CONVERSION dump_test.test_conversion' => {
create_order => 79,
IS \'comment on test conversion\';',
regexp =>
qr/^COMMENT ON CONVERSION dump_test.test_conversion IS 'comment on test conversion';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON COLLATION test0' => {
create_order => 77,
create_sql => 'COMMENT ON COLLATION test0
IS \'comment on test0 collation\';',
regexp =>
- qr/^COMMENT ON COLLATION public.test0 IS 'comment on test0 collation';/m,
+qr/^COMMENT ON COLLATION public.test0 IS 'comment on test0 collation';/m,
collation => 1,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'COMMENT ON LARGE OBJECT ...' => {
create_order => 65,
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1, },
+ column_inserts => 1,
+ data_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1, },
unlike => {
- no_blobs => 1,
+ no_blobs => 1,
schema_only => 1, }, },
'COMMENT ON PUBLICATION pub1' => {
IS \'comment on publication\';',
regexp =>
qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m,
- like => {
- %full_runs,
- section_post_data => 1, }, },
+ like => { %full_runs, section_post_data => 1, }, },
'COMMENT ON SUBSCRIPTION sub1' => {
create_order => 55,
IS \'comment on subscription\';',
regexp =>
qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m,
- like => {
- %full_runs,
- section_post_data => 1, }, },
+ like => { %full_runs, section_post_data => 1, }, },
'COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => {
create_order => 84,
IS \'comment on text search configuration\';',
regexp =>
qr/^COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 IS 'comment on text search configuration';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => {
create_order => 84,
IS \'comment on text search dictionary\';',
regexp =>
qr/^COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 IS 'comment on text search dictionary';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1' => {
create_order => 84,
IS \'comment on text search parser\';',
regexp =>
qr/^COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1 IS 'comment on text search parser';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => {
create_order => 84,
IS \'comment on text search template\';',
regexp =>
qr/^COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 IS 'comment on text search template';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TYPE dump_test.planets - ENUM' => {
create_order => 68,
create_sql => 'COMMENT ON TYPE dump_test.planets
IS \'comment on enum type\';',
- regexp => qr/^COMMENT ON TYPE dump_test.planets IS 'comment on enum type';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ regexp =>
+ qr/^COMMENT ON TYPE dump_test.planets IS 'comment on enum type';/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TYPE dump_test.textrange - RANGE' => {
create_order => 69,
create_sql => 'COMMENT ON TYPE dump_test.textrange
IS \'comment on range type\';',
- regexp => qr/^COMMENT ON TYPE dump_test.textrange IS 'comment on range type';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ regexp =>
+qr/^COMMENT ON TYPE dump_test.textrange IS 'comment on range type';/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TYPE dump_test.int42 - Regular' => {
create_order => 70,
create_sql => 'COMMENT ON TYPE dump_test.int42
IS \'comment on regular type\';',
- regexp => qr/^COMMENT ON TYPE dump_test.int42 IS 'comment on regular type';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ regexp =>
+ qr/^COMMENT ON TYPE dump_test.int42 IS 'comment on regular type';/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COMMENT ON TYPE dump_test.undefined - Undefined' => {
create_order => 71,
create_sql => 'COMMENT ON TYPE dump_test.undefined
IS \'comment on undefined type\';',
regexp =>
- qr/^COMMENT ON TYPE dump_test.undefined IS 'comment on undefined type';/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+qr/^COMMENT ON TYPE dump_test.undefined IS 'comment on undefined type';/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'COPY test_table' => {
create_order => 4,
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
- only_dump_test_table => 1,
- section_data => 1, },
+ data_only => 1,
+ only_dump_test_table => 1,
+ section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- schema_only => 1, }, },
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ schema_only => 1, }, },
'COPY fk_reference_test_table' => {
create_order => 22,
exclude_test_table_data => 1,
section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
# In a data-only dump, we try to actually order according to FKs,
# so this check is just making sure that the referring table comes after
# the referred-to table.
'COPY fk_reference_test_table second' => {
- regexp => qr/^
+ regexp => qr/^
\QCOPY dump_test.test_table (col1, col2, col3, col4) FROM stdin;\E
\n(?:\d\t\\N\t\\N\t\\N\n){9}\\\.\n.*
\QCOPY dump_test.fk_reference_test_table (col1) FROM stdin;\E
\n(?:\d\n){5}\\\.\n
/xms,
- like => { data_only => 1, }, },
+ like => { data_only => 1, }, },
'COPY test_second_table' => {
create_order => 7,
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
- section_data => 1, },
+ data_only => 1,
+ section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
'COPY test_third_table' => {
create_order => 12,
/xm,
like => {
%full_runs,
- data_only => 1,
- role => 1,
- section_data => 1, },
+ data_only => 1,
+ role => 1,
+ section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_test_table_data => 1,
- schema_only => 1,
- with_oids => 1, }, },
+ schema_only => 1,
+ with_oids => 1, }, },
'COPY test_third_table WITH OIDS' => {
- regexp => qr/^
+ regexp => qr/^
\QCOPY dump_test_second_schema.test_third_table (col1) WITH OIDS FROM stdin;\E
\n(?:\d+\t\d\n){9}\\\.\n
/xm,
- like => { with_oids => 1, }, },
+ like => { with_oids => 1, }, },
'COPY test_fourth_table' => {
create_order => 7,
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
- section_data => 1, },
+ data_only => 1,
+ section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
'COPY test_fifth_table' => {
create_order => 54,
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
- section_data => 1, },
+ data_only => 1,
+ section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
'COPY test_table_identity' => {
create_order => 54,
like => {
%full_runs,
%dump_test_schema_runs,
- data_only => 1,
- section_data => 1, },
+ data_only => 1,
+ section_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
'INSERT INTO test_table' => {
- regexp => qr/^
+ regexp => qr/^
(?:INSERT\ INTO\ dump_test.test_table\ \(col1,\ col2,\ col3,\ col4\)\ VALUES\ \(\d,\ NULL,\ NULL,\ NULL\);\n){9}
/xm,
- like => { column_inserts => 1, }, },
+ like => { column_inserts => 1, }, },
'INSERT INTO test_second_table' => {
- regexp => qr/^
+ regexp => qr/^
(?:INSERT\ INTO\ dump_test.test_second_table\ \(col1,\ col2\)
\ VALUES\ \(\d,\ '\d'\);\n){9}/xm,
- like => { column_inserts => 1, }, },
+ like => { column_inserts => 1, }, },
'INSERT INTO test_third_table' => {
- regexp => qr/^
+ regexp => qr/^
(?:INSERT\ INTO\ dump_test_second_schema.test_third_table\ \(col1\)
\ VALUES\ \(\d\);\n){9}/xm,
- like => { column_inserts => 1, }, },
+ like => { column_inserts => 1, }, },
'INSERT INTO test_fourth_table' => {
- regexp => qr/^\QINSERT INTO dump_test.test_fourth_table DEFAULT VALUES;\E/m,
- like => { column_inserts => 1, }, },
+ regexp =>
+ qr/^\QINSERT INTO dump_test.test_fourth_table DEFAULT VALUES;\E/m,
+ like => { column_inserts => 1, }, },
'INSERT INTO test_fifth_table' => {
regexp =>
qr/^\QINSERT INTO dump_test.test_fifth_table (col1, col2, col3, col4, col5) VALUES (NULL, true, false, B'11001', 'NaN');\E/m,
- like => { column_inserts => 1, }, },
+ like => { column_inserts => 1, }, },
'INSERT INTO test_table_identity' => {
regexp =>
qr/^\QINSERT INTO dump_test.test_table_identity (col1, col2) OVERRIDING SYSTEM VALUE VALUES (1, 'test');\E/m,
- like => { column_inserts => 1, }, },
+ like => { column_inserts => 1, }, },
'CREATE ROLE regress_dump_test_role' => {
create_order => 1,
'CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler;',
regexp =>
qr/CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler;/m,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE COLLATION test0 FROM "C"' => {
create_order => 76,
regexp => qr/^
\QCREATE COLLATION public.test0 (provider = libc, locale = 'C');\E/xm,
collation => 1,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE CAST FOR timestamptz' => {
create_order => 51,
'CREATE CAST (timestamptz AS interval) WITH FUNCTION age(timestamptz) AS ASSIGNMENT;',
regexp =>
qr/CREATE CAST \(timestamp with time zone AS interval\) WITH FUNCTION pg_catalog\.age\(timestamp with time zone\) AS ASSIGNMENT;/m,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE DATABASE postgres' => {
- regexp => qr/^
+ regexp => qr/^
\QCREATE DATABASE postgres WITH TEMPLATE = template0 \E
.*;/xm,
- like => { createdb => 1, }, },
+ like => { createdb => 1, }, },
'CREATE DATABASE dump_test' => {
create_order => 47,
regexp => qr/^
\QCREATE DATABASE dump_test WITH TEMPLATE = template0 \E
.*;/xm,
- like => { pg_dumpall_dbprivs => 1, }, },
+ like => { pg_dumpall_dbprivs => 1, }, },
'CREATE EXTENSION ... plpgsql' => {
- regexp => qr/^
+ regexp => qr/^
\QCREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;\E
/xm,
+
# this shouldn't ever get emitted anymore
like => {}, },
like => {
%full_runs,
%dump_test_schema_runs,
- exclude_test_table => 1,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ exclude_test_table => 1,
+ section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE CONVERSION dump_test.test_conversion' => {
create_order => 78,
'CREATE DEFAULT CONVERSION dump_test.test_conversion FOR \'LATIN1\' TO \'UTF8\' FROM iso8859_1_to_utf8;',
regexp =>
qr/^\QCREATE DEFAULT CONVERSION dump_test.test_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;\E/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE DOMAIN dump_test.us_postal_code' => {
create_order => 29,
\$\Q'::text) OR (VALUE ~ '^\d{5}-\d{4}\E\$
\Q'::text)));\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE FUNCTION dump_test.pltestlang_call_handler' => {
create_order => 17,
\n\s+AS\ \'\$
\Qlibdir\/plpgsql', 'plpgsql_call_handler';\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE FUNCTION dump_test.trigger_func' => {
create_order => 30,
\n\s+AS\ \$\$
\Q BEGIN RETURN NULL; END;\E
\$\$;/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE FUNCTION dump_test.event_trigger_func' => {
create_order => 32,
\n\s+AS\ \$\$
\Q BEGIN RETURN; END;\E
\$\$;/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE OPERATOR FAMILY dump_test.op_family' => {
create_order => 73,
regexp => qr/^
\QCREATE OPERATOR FAMILY dump_test.op_family USING btree;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE OPERATOR CLASS dump_test.op_class' => {
create_order => 74,
\QFUNCTION 1 (bigint, bigint) btint8cmp(bigint,bigint) ,\E\n\s+
\QFUNCTION 2 (bigint, bigint) btint8sortsupport(internal);\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE EVENT TRIGGER test_event_trigger' => {
create_order => 33,
\QON ddl_command_start\E
\n\s+\QEXECUTE PROCEDURE dump_test.event_trigger_func();\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, }, },
+ like => { %full_runs, section_post_data => 1, }, },
'CREATE TRIGGER test_trigger' => {
create_order => 31,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
- exclude_test_table => 1,
+ exclude_test_table => 1,
exclude_dump_test_schema => 1, }, },
'CREATE TYPE dump_test.planets AS ENUM' => {
\n\s+'earth',
\n\s+'mars'
\n\);/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1, }, },
'CREATE TYPE dump_test.planets AS ENUM pg_upgrade' => {
- regexp => qr/^
+ regexp => qr/^
\QCREATE TYPE dump_test.planets AS ENUM (\E
\n\);.*^
\QALTER TYPE dump_test.planets ADD VALUE 'venus';\E
\n.*^
\QALTER TYPE dump_test.planets ADD VALUE 'mars';\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE TYPE dump_test.textrange AS RANGE' => {
create_order => 38,
\n\s+\Qsubtype = text,\E
\n\s+\Qcollation = pg_catalog."C"\E
\n\);/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TYPE dump_test.int42' => {
create_order => 39,
create_sql => 'CREATE TYPE dump_test.int42;',
regexp => qr/^CREATE TYPE dump_test.int42;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => {
create_order => 80,
regexp => qr/^
\QCREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 (\E\n
\s+\QPARSER = pg_catalog."default" );\E/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 ...' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1\E\n
\s+\QADD MAPPING FOR asciiword WITH english_stem;\E\n
\n
\s+\QADD MAPPING FOR uint WITH simple;\E\n
\n
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => {
create_order => 81,
regexp => qr/^
\QCREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 (\E\n
\s+\QLEXIZE = dsimple_lexize );\E/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TEXT SEARCH PARSER dump_test.alt_ts_prs1' => {
create_order => 82,
\s+\QEND = prsd_end,\E\n
\s+\QLEXTYPES = prsd_lextype );\E\n
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => {
create_order => 83,
\QCREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 (\E\n
\s+\QTEMPLATE = pg_catalog.simple );\E\n
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE FUNCTION dump_test.int42_in' => {
create_order => 40,
\n\s+\QLANGUAGE internal IMMUTABLE STRICT\E
\n\s+AS\ \$\$int4in\$\$;
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE FUNCTION dump_test.int42_out' => {
create_order => 41,
\n\s+\QLANGUAGE internal IMMUTABLE STRICT\E
\n\s+AS\ \$\$int4out\$\$;
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE PROCEDURE dump_test.ptest1' => {
create_order => 41,
\n\s+\QLANGUAGE sql\E
\n\s+AS\ \$\$\Q INSERT INTO dump_test.test_table (col1) VALUES (a) \E\$\$;
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TYPE dump_test.int42 populated' => {
create_order => 42,
\n\s+\QSTORAGE = plain,\E
\n\s+PASSEDBYVALUE\n\);
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TYPE dump_test.composite' => {
create_order => 43,
\n\s+\Qf2 dump_test.int42\E
\n\);
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TYPE dump_test.undefined' => {
create_order => 39,
create_sql => 'CREATE TYPE dump_test.undefined;',
regexp => qr/^CREATE TYPE dump_test.undefined;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE FOREIGN DATA WRAPPER dummy' => {
create_order => 35,
create_sql => 'CREATE FOREIGN DATA WRAPPER dummy;',
regexp => qr/CREATE FOREIGN DATA WRAPPER dummy;/m,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy' => {
create_order => 36,
create_sql => 'CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;',
regexp => qr/CREATE SERVER s1 FOREIGN DATA WRAPPER dummy;/m,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => {
create_order => 88,
\s+\Qschema_name 'x1'\E\n
\Q);\E\n
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1' => {
create_order => 86,
'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;',
regexp =>
qr/CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;/m,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE TRANSFORM FOR int' => {
create_order => 34,
'CREATE TRANSFORM FOR int LANGUAGE SQL (FROM SQL WITH FUNCTION varchar_transform(internal), TO SQL WITH FUNCTION int4recv(internal));',
regexp =>
qr/CREATE TRANSFORM FOR integer LANGUAGE sql \(FROM SQL WITH FUNCTION pg_catalog\.varchar_transform\(internal\), TO SQL WITH FUNCTION pg_catalog\.int4recv\(internal\)\);/m,
- like => {
- %full_runs,
- section_pre_data => 1, }, },
+ like => { %full_runs, section_pre_data => 1, }, },
'CREATE LANGUAGE pltestlang' => {
create_order => 18,
regexp => qr/^
\QCREATE PROCEDURAL LANGUAGE pltestlang \E
\QHANDLER dump_test.pltestlang_call_handler;\E
- /xm,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ /xm,
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE MATERIALIZED VIEW matview' => {
create_order => 20,
\n\s+\QFROM dump_test.test_table\E
\n\s+\QWITH NO DATA;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE MATERIALIZED VIEW matview_second' => {
create_order => 21,
\n\s+\QFROM dump_test.matview\E
\n\s+\QWITH NO DATA;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE MATERIALIZED VIEW matview_third' => {
create_order => 58,
\n\s+\QFROM dump_test.matview_second\E
\n\s+\QWITH NO DATA;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE MATERIALIZED VIEW matview_fourth' => {
create_order => 59,
\n\s+\QFROM dump_test.matview_third\E
\n\s+\QWITH NO DATA;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE POLICY p1 ON test_table' => {
create_order => 22,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE POLICY p2 ON test_table FOR SELECT' => {
create_order => 24,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE POLICY p3 ON test_table FOR INSERT' => {
create_order => 25,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE POLICY p4 ON test_table FOR UPDATE' => {
create_order => 26,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE POLICY p5 ON test_table FOR DELETE' => {
create_order => 27,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE POLICY p6 ON test_table AS RESTRICTIVE' => {
create_order => 27,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_post_data => 1, },
+ only_dump_test_table => 1,
+ section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE PUBLICATION pub1' => {
create_order => 50,
regexp => qr/^
\QCREATE PUBLICATION pub1 WITH (publish = 'insert, update, delete, truncate');\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, }, },
+ like => { %full_runs, section_post_data => 1, }, },
'CREATE PUBLICATION pub2' => {
create_order => 50,
regexp => qr/^
\QCREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish = '');\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, }, },
+ like => { %full_runs, section_post_data => 1, }, },
'CREATE SUBSCRIPTION sub1' => {
create_order => 50,
regexp => qr/^
\QCREATE SUBSCRIPTION sub1 CONNECTION 'dbname=doesnotexist' PUBLICATION pub1 WITH (connect = false, slot_name = 'sub1');\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, }, },
+ like => { %full_runs, section_post_data => 1, }, },
'ALTER PUBLICATION pub1 ADD TABLE test_table' => {
create_order => 51,
regexp => qr/^
\QALTER PUBLICATION pub1 ADD TABLE ONLY dump_test.test_table;\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'ALTER PUBLICATION pub1 ADD TABLE test_second_table' => {
create_order => 52,
regexp => qr/^
\QALTER PUBLICATION pub1 ADD TABLE ONLY dump_test.test_second_table;\E
/xm,
- like => {
- %full_runs,
- section_post_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like => { %full_runs, section_post_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE SCHEMA public' => {
- regexp => qr/^CREATE SCHEMA public;/m,
+ regexp => qr/^CREATE SCHEMA public;/m,
+
# this shouldn't ever get emitted anymore
- like => {}, },
+ like => {}, },
'CREATE SCHEMA dump_test' => {
create_order => 2,
create_sql => 'CREATE SCHEMA dump_test;',
regexp => qr/^CREATE SCHEMA dump_test;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE SCHEMA dump_test_second_schema' => {
create_order => 9,
regexp => qr/^CREATE SCHEMA dump_test_second_schema;/m,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, }, },
+ role => 1,
+ section_pre_data => 1, }, },
'CREATE TABLE test_table' => {
create_order => 3,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1, }, },
+ exclude_test_table => 1, }, },
'CREATE TABLE fk_reference_test_table' => {
create_order => 21,
\n\s+\Qcol1 integer NOT NULL\E
\n\);
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TABLE test_second_table' => {
create_order => 6,
\n\s+\Qcol2 text\E
\n\);
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE UNLOGGED TABLE test_third_table WITH OIDS' => {
create_order => 11,
/xm,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
+ role => 1,
+ section_pre_data => 1, },
unlike => {
+
# FIXME figure out why/how binary upgrade drops OIDs.
binary_upgrade => 1, }, },
\)\n
\QPARTITION BY RANGE (logdate);\E\n
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1, }, },
'CREATE TABLE measurement_y2006m2 PARTITION OF' => {
/xm,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- binary_upgrade => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { binary_upgrade => 1, }, },
'CREATE TABLE test_fourth_table_zero_col' => {
create_order => 6,
\QCREATE TABLE dump_test.test_fourth_table (\E
\n\);
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TABLE test_fifth_table' => {
create_order => 53,
\n\s+\Qcol5 double precision\E
\n\);
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE TABLE test_table_identity' => {
create_order => 3,
\s+\QCACHE 1\E\n
\);
/xms,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE STATISTICS extended_stats_no_options' => {
create_order => 97,
regexp => qr/^
\QCREATE STATISTICS dump_test.test_ext_stats_no_options ON col1, col2 FROM dump_test.test_fifth_table;\E
/xms,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_post_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE STATISTICS extended_stats_options' => {
create_order => 97,
regexp => qr/^
\QCREATE STATISTICS dump_test.test_ext_stats_opts (ndistinct) ON col1, col2 FROM dump_test.test_fifth_table;\E
/xms,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_post_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE SEQUENCE test_table_col1_seq' => {
- regexp => qr/^
+ regexp => qr/^
\QCREATE SEQUENCE dump_test.test_table_col1_seq\E
\n\s+\QAS integer\E
\n\s+\QSTART WITH 1\E
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE SEQUENCE test_third_table_col1_seq' => {
- regexp => qr/^
+ regexp => qr/^
\QCREATE SEQUENCE dump_test_second_schema.test_third_table_col1_seq\E
\n\s+\QAS integer\E
\n\s+\QSTART WITH 1\E
/xm,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, }, },
+ role => 1,
+ section_pre_data => 1, }, },
'CREATE UNIQUE INDEX test_third_table_idx ON test_third_table' => {
create_order => 13,
/xm,
like => {
%full_runs,
- role => 1,
- section_post_data => 1, }, },
+ role => 1,
+ section_post_data => 1, }, },
'CREATE INDEX ON ONLY measurement' => {
create_order => 92,
- create_sql => 'CREATE INDEX ON dump_test.measurement (city_id, logdate);',
+ create_sql =>
+ 'CREATE INDEX ON dump_test.measurement (city_id, logdate);',
regexp => qr/^
\QCREATE INDEX measurement_city_id_logdate_idx ON ONLY dump_test.measurement USING\E
/xm,
like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_privs => 1,
- no_owner => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_post_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ no_owner => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_post_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
exclude_dump_test_schema => 1,
only_dump_test_table => 1,
all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 93,
- create_sql => 'ALTER TABLE dump_test.measurement ADD PRIMARY KEY (city_id, logdate);',
+ create_sql =>
+'ALTER TABLE dump_test.measurement ADD PRIMARY KEY (city_id, logdate);',
regexp => qr/^
\QALTER TABLE ONLY dump_test.measurement\E \n^\s+
\QADD CONSTRAINT measurement_pkey PRIMARY KEY (city_id, logdate);\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_post_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'CREATE INDEX ... ON measurement_y2006_m2' => {
- regexp => qr/^
+ regexp => qr/^
\QCREATE INDEX measurement_y2006m2_city_id_logdate_idx ON dump_test_second_schema.measurement_y2006m2 \E
/xm,
like => {
%full_runs,
- role => 1,
- section_post_data => 1, }, },
+ role => 1,
+ section_post_data => 1, }, },
'ALTER INDEX ... ATTACH PARTITION' => {
- regexp => qr/^
+ regexp => qr/^
\QALTER INDEX dump_test.measurement_city_id_logdate_idx ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_city_id_logdate_idx\E
/xm,
like => {
%full_runs,
- role => 1,
- section_post_data => 1, }, },
+ role => 1,
+ section_post_data => 1, }, },
'ALTER INDEX ... ATTACH PARTITION (primary key)' => {
- all_runs => 1,
- catch_all => 'CREATE ... commands',
- regexp => qr/^
+ all_runs => 1,
+ catch_all => 'CREATE ... commands',
+ regexp => qr/^
\QALTER INDEX dump_test.measurement_pkey ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_pkey\E
/xm,
like => {
\n\s+\QSELECT test_table.col1\E
\n\s+\QFROM dump_test.test_table\E
\n\s+\QWITH LOCAL CHECK OPTION;\E/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
'ALTER VIEW test_view SET DEFAULT' => {
create_order => 62,
'ALTER VIEW dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;',
regexp => qr/^
\QALTER TABLE ONLY dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;\E/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
- unlike => {
- exclude_dump_test_schema => 1, }, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => { exclude_dump_test_schema => 1, }, },
# FIXME
'DROP SCHEMA public (for testing without public schema)' => {
like => {}, },
'DROP SCHEMA public' => {
- regexp => qr/^DROP SCHEMA public;/m,
+ regexp => qr/^DROP SCHEMA public;/m,
+
# this shouldn't ever get emitted anymore
- like => {}, },
+ like => {}, },
'DROP SCHEMA IF EXISTS public' => {
- regexp => qr/^DROP SCHEMA IF EXISTS public;/m,
+ regexp => qr/^DROP SCHEMA IF EXISTS public;/m,
+
# this shouldn't ever get emitted anymore
- like => {}, },
+ like => {}, },
'DROP EXTENSION plpgsql' => {
- regexp => qr/^DROP EXTENSION plpgsql;/m,
+ regexp => qr/^DROP EXTENSION plpgsql;/m,
+
# this shouldn't ever get emitted anymore
- like => {}, },
+ like => {}, },
'DROP FUNCTION dump_test.pltestlang_call_handler()' => {
regexp => qr/^DROP FUNCTION dump_test\.pltestlang_call_handler\(\);/m,
like => { clean => 1, }, },
'DROP LANGUAGE pltestlang' => {
- regexp => qr/^DROP PROCEDURAL LANGUAGE pltestlang;/m,
- like => { clean => 1, }, },
+ regexp => qr/^DROP PROCEDURAL LANGUAGE pltestlang;/m,
+ like => { clean => 1, }, },
'DROP SCHEMA dump_test' => {
- regexp => qr/^DROP SCHEMA dump_test;/m,
- like => { clean => 1, }, },
+ regexp => qr/^DROP SCHEMA dump_test;/m,
+ like => { clean => 1, }, },
'DROP SCHEMA dump_test_second_schema' => {
- regexp => qr/^DROP SCHEMA dump_test_second_schema;/m,
- like => { clean => 1, }, },
+ regexp => qr/^DROP SCHEMA dump_test_second_schema;/m,
+ like => { clean => 1, }, },
'DROP TABLE test_table' => {
- regexp => qr/^DROP TABLE dump_test\.test_table;/m,
- like => { clean => 1, }, },
+ regexp => qr/^DROP TABLE dump_test\.test_table;/m,
+ like => { clean => 1, }, },
'DROP TABLE fk_reference_test_table' => {
- regexp => qr/^DROP TABLE dump_test\.fk_reference_test_table;/m,
- like => { clean => 1, }, },
+ regexp => qr/^DROP TABLE dump_test\.fk_reference_test_table;/m,
+ like => { clean => 1, }, },
'DROP TABLE test_second_table' => {
- regexp => qr/^DROP TABLE dump_test\.test_second_table;/m,
- like => { clean => 1, }, },
+ regexp => qr/^DROP TABLE dump_test\.test_second_table;/m,
+ like => { clean => 1, }, },
'DROP TABLE test_third_table' => {
regexp => qr/^DROP TABLE dump_test_second_schema\.test_third_table;/m,
like => { clean => 1, }, },
'DROP EXTENSION IF EXISTS plpgsql' => {
- regexp => qr/^DROP EXTENSION IF EXISTS plpgsql;/m,
+ regexp => qr/^DROP EXTENSION IF EXISTS plpgsql;/m,
+
# this shouldn't ever get emitted anymore
- like => {}, },
+ like => {}, },
'DROP FUNCTION IF EXISTS dump_test.pltestlang_call_handler()' => {
- regexp => qr/^
+ regexp => qr/^
\QDROP FUNCTION IF EXISTS dump_test.pltestlang_call_handler();\E
/xm,
- like => { clean_if_exists => 1, }, },
+ like => { clean_if_exists => 1, }, },
'DROP LANGUAGE IF EXISTS pltestlang' => {
- regexp => qr/^DROP PROCEDURAL LANGUAGE IF EXISTS pltestlang;/m,
- like => { clean_if_exists => 1, }, },
+ regexp => qr/^DROP PROCEDURAL LANGUAGE IF EXISTS pltestlang;/m,
+ like => { clean_if_exists => 1, }, },
'DROP SCHEMA IF EXISTS dump_test' => {
- regexp => qr/^DROP SCHEMA IF EXISTS dump_test;/m,
- like => { clean_if_exists => 1, }, },
+ regexp => qr/^DROP SCHEMA IF EXISTS dump_test;/m,
+ like => { clean_if_exists => 1, }, },
'DROP SCHEMA IF EXISTS dump_test_second_schema' => {
- regexp => qr/^DROP SCHEMA IF EXISTS dump_test_second_schema;/m,
- like => { clean_if_exists => 1, }, },
+ regexp => qr/^DROP SCHEMA IF EXISTS dump_test_second_schema;/m,
+ like => { clean_if_exists => 1, }, },
'DROP TABLE IF EXISTS test_table' => {
- regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_table;/m,
- like => { clean_if_exists => 1, }, },
+ regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_table;/m,
+ like => { clean_if_exists => 1, }, },
'DROP TABLE IF EXISTS test_second_table' => {
- regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_second_table;/m,
- like => { clean_if_exists => 1, }, },
+ regexp => qr/^DROP TABLE IF EXISTS dump_test\.test_second_table;/m,
+ like => { clean_if_exists => 1, }, },
'DROP TABLE IF EXISTS test_third_table' => {
- regexp => qr/^
+ regexp => qr/^
\QDROP TABLE IF EXISTS dump_test_second_schema.test_third_table;\E
/xm,
- like => { clean_if_exists => 1, }, },
+ like => { clean_if_exists => 1, }, },
'DROP ROLE regress_dump_test_role' => {
- regexp => qr/^
+ regexp => qr/^
\QDROP ROLE regress_dump_test_role;\E
/xm,
- like => { pg_dumpall_globals_clean => 1, }, },
+ like => { pg_dumpall_globals_clean => 1, }, },
'DROP ROLE pg_' => {
- regexp => qr/^
+ regexp => qr/^
\QDROP ROLE pg_\E.*;
/xm,
+
# this shouldn't ever get emitted anywhere
- like => {}, },
+ like => {}, },
'GRANT USAGE ON SCHEMA dump_test_second_schema' => {
create_order => 10,
/xm,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT USAGE ON FOREIGN DATA WRAPPER dummy' => {
create_order => 85,
regexp => qr/^
\QGRANT ALL ON FOREIGN DATA WRAPPER dummy TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT USAGE ON FOREIGN SERVER s1' => {
create_order => 85,
regexp => qr/^
\QGRANT ALL ON FOREIGN SERVER s1 TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT USAGE ON DOMAIN dump_test.us_postal_code' => {
create_order => 72,
regexp => qr/^
\QGRANT ALL ON TYPE dump_test.us_postal_code TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'GRANT USAGE ON TYPE dump_test.int42' => {
create_order => 87,
regexp => qr/^
\QGRANT ALL ON TYPE dump_test.int42 TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'GRANT USAGE ON TYPE dump_test.planets - ENUM' => {
create_order => 66,
regexp => qr/^
\QGRANT ALL ON TYPE dump_test.planets TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'GRANT USAGE ON TYPE dump_test.textrange - RANGE' => {
create_order => 67,
- create_sql => 'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;',
+ create_sql =>
+'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON TYPE dump_test.textrange TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'GRANT CREATE ON DATABASE dump_test' => {
create_order => 48,
regexp => qr/^
\QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E
/xm,
- like => { pg_dumpall_dbprivs => 1, }, },
+ like => { pg_dumpall_dbprivs => 1, }, },
'GRANT SELECT ON TABLE test_table' => {
create_order => 5,
create_sql => 'GRANT SELECT ON TABLE dump_test.test_table
TO regress_dump_test_role;',
regexp =>
- qr/^GRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;/m,
+qr/^GRANT SELECT ON TABLE dump_test.test_table TO regress_dump_test_role;/m,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- section_pre_data => 1, },
+ only_dump_test_table => 1,
+ section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- no_privs => 1, }, },
+ exclude_test_table => 1,
+ no_privs => 1, }, },
'GRANT SELECT ON TABLE test_third_table' => {
create_order => 19,
qr/^GRANT SELECT ON TABLE dump_test_second_schema.test_third_table TO regress_dump_test_role;/m,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT ALL ON SEQUENCE test_third_table_col1_seq' => {
create_order => 28,
/xm,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT SELECT ON TABLE measurement' => {
create_order => 91,
TABLE dump_test.measurement
TO regress_dump_test_role;',
regexp =>
- qr/^GRANT SELECT ON TABLE dump_test.measurement TO regress_dump_test_role;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+qr/^GRANT SELECT ON TABLE dump_test.measurement TO regress_dump_test_role;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'GRANT SELECT ON TABLE measurement_y2006m2' => {
create_order => 92,
create_sql => 'GRANT SELECT ON
TABLE dump_test_second_schema.measurement_y2006m2
TO regress_dump_test_role;',
- regexp => qr/^GRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;/m,
+ regexp =>
+qr/^GRANT SELECT ON TABLE dump_test_second_schema.measurement_y2006m2 TO regress_dump_test_role;/m,
like => {
%full_runs,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT ALL ON LARGE OBJECT ...' => {
create_order => 60,
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1, },
+ column_inserts => 1,
+ data_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1, },
unlike => {
binary_upgrade => 1,
- no_blobs => 1,
- no_privs => 1,
- schema_only => 1, }, },
+ no_blobs => 1,
+ no_privs => 1,
+ schema_only => 1, }, },
'GRANT INSERT(col1) ON TABLE test_second_table' => {
create_order => 8,
regexp => qr/^
\QGRANT INSERT(col1) ON TABLE dump_test.test_second_table TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1, }, },
+ no_privs => 1, }, },
'GRANT EXECUTE ON FUNCTION pg_sleep() TO regress_dump_test_role' => {
create_order => 16,
regexp => qr/^
\QGRANT ALL ON FUNCTION pg_catalog.pg_sleep(double precision) TO regress_dump_test_role;\E
/xm,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT SELECT (proname ...) ON TABLE pg_proc TO public' => {
create_order => 46,
\QGRANT SELECT(probin) ON TABLE pg_catalog.pg_proc TO PUBLIC;\E\n.*
\QGRANT SELECT(proconfig) ON TABLE pg_catalog.pg_proc TO PUBLIC;\E\n.*
\QGRANT SELECT(proacl) ON TABLE pg_catalog.pg_proc TO PUBLIC;\E/xms,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'GRANT USAGE ON SCHEMA public TO public' => {
regexp => qr/^
\Q--\E\n\n
\QGRANT USAGE ON SCHEMA public TO PUBLIC;\E
/xm,
+
# this shouldn't ever get emitted anymore
like => {}, },
'REFRESH MATERIALIZED VIEW matview' => {
- regexp => qr/^REFRESH MATERIALIZED VIEW dump_test.matview;/m,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_post_data => 1, },
+ regexp => qr/^REFRESH MATERIALIZED VIEW dump_test.matview;/m,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
'REFRESH MATERIALIZED VIEW matview_second' => {
- regexp => qr/^
+ regexp => qr/^
\QREFRESH MATERIALIZED VIEW dump_test.matview;\E
\n.*
\QREFRESH MATERIALIZED VIEW dump_test.matview_second;\E
/xms,
- like => {
- %full_runs,
- %dump_test_schema_runs,
- section_post_data => 1, },
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
- binary_upgrade => 1,
+ binary_upgrade => 1,
exclude_dump_test_schema => 1,
- schema_only => 1, }, },
+ schema_only => 1, }, },
# FIXME
'REFRESH MATERIALIZED VIEW matview_third' => {
- regexp => qr/^
+ regexp => qr/^
\QREFRESH MATERIALIZED VIEW dump_test.matview_third;\E
/xms,
- like => {}, },
+ like => {}, },
# FIXME
'REFRESH MATERIALIZED VIEW matview_fourth' => {
- regexp => qr/^
+ regexp => qr/^
\QREFRESH MATERIALIZED VIEW dump_test.matview_fourth;\E
/xms,
- like => {}, },
+ like => {}, },
'REVOKE CONNECT ON DATABASE dump_test FROM public' => {
create_order => 49,
\QGRANT TEMPORARY ON DATABASE dump_test TO PUBLIC;\E\n
\QGRANT CREATE ON DATABASE dump_test TO regress_dump_test_role;\E
/xm,
- like => { pg_dumpall_dbprivs => 1, }, },
+ like => { pg_dumpall_dbprivs => 1, }, },
'REVOKE EXECUTE ON FUNCTION pg_sleep() FROM public' => {
create_order => 15,
regexp => qr/^
\QREVOKE ALL ON FUNCTION pg_catalog.pg_sleep(double precision) FROM PUBLIC;\E
/xm,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'REVOKE SELECT ON TABLE pg_proc FROM public' => {
create_order => 45,
create_sql => 'REVOKE SELECT ON TABLE pg_proc FROM public;',
- regexp => qr/^REVOKE SELECT ON TABLE pg_catalog.pg_proc FROM PUBLIC;/m,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ regexp =>
+ qr/^REVOKE SELECT ON TABLE pg_catalog.pg_proc FROM PUBLIC;/m,
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'REVOKE CREATE ON SCHEMA public FROM public' => {
create_order => 16,
\QREVOKE ALL ON SCHEMA public FROM PUBLIC;\E
\n\QGRANT USAGE ON SCHEMA public TO PUBLIC;\E
/xm,
- like => {
- %full_runs,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
'REVOKE USAGE ON LANGUAGE plpgsql FROM public' => {
create_order => 16,
like => {
%full_runs,
%dump_test_schema_runs,
- only_dump_test_table => 1,
- role => 1,
- section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ only_dump_test_table => 1,
+ role => 1,
+ section_pre_data => 1, },
+ unlike => { no_privs => 1, }, },
);
next;
}
- # If there is a like entry, but no unlike entry, then we will test the like case
- if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key}))
+# If there is a like entry, but no unlike entry, then we will test the like case
+ if ($tests{$test}->{like}->{$test_key}
+ && !defined($tests{$test}->{unlike}->{$test_key}))
{
$num_tests++;
}
# Run the test listed as a like, unless it is specifically noted
# as an unlike (generally due to an explicit exclusion or similar).
- if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key}))
+ if ($tests{$test}->{like}->{$test_key}
+ && !defined($tests{$test}->{unlike}->{$test_key}))
{
- if (!ok($output_file =~ $tests{$test}->{regexp}, "$run: should dump $test"))
+ if (!ok($output_file =~ $tests{$test}->{regexp},
+ "$run: should dump $test"))
{
diag("Review $run results in $tempdir");
}
}
else
{
- if (!ok(
- $output_file !~
- $tests{$test}->{regexp},
- "$run: should not dump $test"))
+ if (!ok($output_file !~ $tests{$test}->{regexp},
+ "$run: should not dump $test"))
{
diag("Review $run results in $tempdir");
}
$node->init;
command_like([ 'pg_resetwal', '-n', $node->data_dir ],
- qr/checkpoint/,
- 'pg_resetwal -n produces output');
+ qr/checkpoint/, 'pg_resetwal -n produces output');
# Permissions on PGDATA should be default
SKIP:
{
- skip "unix-style permissions not supported on Windows", 1 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 1
+ if ($windows_os);
ok(check_mode_recursive($node->data_dir, 0700, 0600),
'check PGDATA permissions');
$node->init;
my $pg_control = $node->data_dir . '/global/pg_control';
-my $size = (stat($pg_control))[7];
+my $size = (stat($pg_control))[7];
# Read out the head of the file to get PG_CONTROL_VERSION in
# particular.
print $fh pack("x[$size]");
close $fh;
-command_checks_all([ 'pg_resetwal', '-n', $node->data_dir ],
- 0,
- [ qr/pg_control version number/ ],
- [ qr/pg_resetwal: pg_control exists but is broken or wrong version; ignoring it/ ],
- 'processes corrupted pg_control all zeroes');
+command_checks_all(
+ [ 'pg_resetwal', '-n', $node->data_dir ],
+ 0,
+ [qr/pg_control version number/],
+ [
+qr/pg_resetwal: pg_control exists but is broken or wrong version; ignoring it/
+ ],
+ 'processes corrupted pg_control all zeroes');
# Put in the previously saved header data. This uses a different code
# path internally, allowing us to process a zero WAL segment size.
print $fh $data, pack("x[" . ($size - 16) . "]");
close $fh;
-command_checks_all([ 'pg_resetwal', '-n', $node->data_dir ],
- 0,
- [ qr/pg_control version number/ ],
- [ qr/\Qpg_resetwal: pg_control specifies invalid WAL segment size (0 bytes); proceed with caution\E/ ],
- 'processes zero WAL segment size');
+command_checks_all(
+ [ 'pg_resetwal', '-n', $node->data_dir ],
+ 0,
+ [qr/pg_control version number/],
+ [
+qr/\Qpg_resetwal: pg_control specifies invalid WAL segment size (0 bytes); proceed with caution\E/
+ ],
+ 'processes zero WAL segment size');
sub setup_cluster
{
- my $extra_name = shift; # Used to differentiate clusters
- my $extra = shift; # Extra params for initdb
+ my $extra_name = shift; # Used to differentiate clusters
+ my $extra = shift; # Extra params for initdb
# Initialize master, data checksums are mandatory
- $node_master = get_new_node('master' . ($extra_name ? "_${extra_name}" : ''));
- $node_master->init(
- allows_streaming => 1, extra => $extra);
+ $node_master =
+ get_new_node('master' . ($extra_name ? "_${extra_name}" : ''));
+ $node_master->init(allows_streaming => 1, extra => $extra);
+
# Set wal_keep_segments to prevent WAL segment recycling after enforced
# checkpoints in the tests.
- $node_master->append_conf('postgresql.conf', qq(
+ $node_master->append_conf(
+ 'postgresql.conf', qq(
wal_keep_segments = 20
));
}
{
my $extra_name = shift;
- $node_standby = get_new_node('standby' . ($extra_name ? "_${extra_name}" : ''));
+ $node_standby =
+ get_new_node('standby' . ($extra_name ? "_${extra_name}" : ''));
$node_master->backup('my_backup');
$node_standby->init_from_backup($node_master, 'my_backup');
my $connstr_master = $node_master->connstr();
"$tmp_folder/master-postgresql.conf.tmp",
"$master_pgdata/postgresql.conf");
- chmod($node_master->group_access() ? 0640 : 0600,
- "$master_pgdata/postgresql.conf")
- or BAIL_OUT(
- "unable to set permissions for $master_pgdata/postgresql.conf");
+ chmod(
+ $node_master->group_access() ? 0640 : 0600,
+ "$master_pgdata/postgresql.conf")
+ or BAIL_OUT(
+ "unable to set permissions for $master_pgdata/postgresql.conf");
# Plug-in rewound node to the now-promoted standby node
my $port_standby = $node_standby->port;
'tail-copy');
# Permissions on PGDATA should be default
- SKIP:
+ SKIP:
{
- skip "unix-style permissions not supported on Windows", 1 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 1
+ if ($windows_os);
ok(check_mode_recursive($node_master->data_dir(), 0700, 0600),
'check PGDATA permissions');
'database names');
# Permissions on PGDATA should have group permissions
- SKIP:
+ SKIP:
{
- skip "unix-style permissions not supported on Windows", 1 if ($windows_os);
+ skip "unix-style permissions not supported on Windows", 1
+ if ($windows_os);
ok(check_mode_recursive($node_master->data_dir(), 0750, 0640),
'check PGDATA permissions');
# Initialize pgbench tables scale 1
pgbench(
- '-i', 0, [qr{^$}],
- [ qr{creating tables}, qr{vacuuming}, qr{creating primary keys}, qr{done\.} ],
+ '-i', 0,
+ [qr{^$}],
+ [ qr{creating tables}, qr{vacuuming},
+ qr{creating primary keys}, qr{done\.} ],
'pgbench scale 1 initialization',);
# Again, with all possible options
# Test interaction of --init-steps with legacy step-selection options
pgbench(
- '--initialize --init-steps=dtpvgvv --no-vacuum --foreign-keys --unlogged-tables',
- 0, [qr{^$}],
+'--initialize --init-steps=dtpvgvv --no-vacuum --foreign-keys --unlogged-tables',
+ 0,
+ [qr{^$}],
[ qr{dropping old tables},
qr{creating tables},
qr{creating primary keys},
# test expressions
# command 1..3 and 23 depend on random seed which is used to call srandom.
pgbench(
- '--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dminint=-9223372036854775808 -Dn=null -Dt=t -Df=of -Dd=1.0',
+'--random-seed=5432 -t 1 -Dfoo=-10.1 -Dbla=false -Di=+3 -Dminint=-9223372036854775808 -Dn=null -Dt=t -Df=of -Dd=1.0',
0,
[ qr{type: .*/001_pgbench_expressions}, qr{processed: 1/1} ],
[ qr{setting random seed to 5432\b},
+
# After explicit seeding, the four * random checks (1-3,20) should be
# deterministic, but not necessarily portable.
- qr{command=1.: int 1\d\b}, # uniform random: 12 on linux
- qr{command=2.: int 1\d\d\b}, # exponential random: 106 on linux
- qr{command=3.: int 1\d\d\d\b}, # gaussian random: 1462 on linux
+ qr{command=1.: int 1\d\b}, # uniform random: 12 on linux
+ qr{command=2.: int 1\d\d\b}, # exponential random: 106 on linux
+ qr{command=3.: int 1\d\d\d\b}, # gaussian random: 1462 on linux
qr{command=4.: int 4\b},
qr{command=5.: int 5\b},
qr{command=6.: int 6\b},
qr{command=16.: double 16\b},
qr{command=17.: double 17\b},
qr{command=18.: int 9223372036854775807\b},
- qr{command=20.: int \d\b}, # zipfian random: 1 on linux
+ qr{command=20.: int \d\b}, # zipfian random: 1 on linux
qr{command=21.: double -27\b},
qr{command=22.: double 1024\b},
qr{command=23.: double 1\b},
qr{command=86.: int 86\b},
qr{command=93.: int 93\b},
qr{command=95.: int 0\b},
- qr{command=96.: int 1\b}, # :scale
- qr{command=97.: int 0\b}, # :client_id
- qr{command=98.: int 5432\b}, # :random_seed
+ qr{command=96.: int 1\b}, # :scale
+ qr{command=97.: int 0\b}, # :client_id
+ qr{command=98.: int 5432\b}, # :random_seed
],
'pgbench expressions',
{ '001_pgbench_expressions' => q{-- integer functions
# random determinism when seeded
$node->safe_psql('postgres',
- 'CREATE UNLOGGED TABLE seeded_random(seed INT8 NOT NULL, rand TEXT NOT NULL, val INTEGER NOT NULL);');
+'CREATE UNLOGGED TABLE seeded_random(seed INT8 NOT NULL, rand TEXT NOT NULL, val INTEGER NOT NULL);'
+);
# same value to check for determinism
my $seed = int(rand(1000000000));
for my $i (1, 2)
{
- pgbench("--random-seed=$seed -t 1",
- 0,
- [qr{processed: 1/1}],
- [qr{setting random seed to $seed\b}],
- "random seeded with $seed",
- { "001_pgbench_random_seed_$i" => q{-- test random functions
+ pgbench(
+ "--random-seed=$seed -t 1",
+ 0,
+ [qr{processed: 1/1}],
+ [qr{setting random seed to $seed\b}],
+ "random seeded with $seed",
+ { "001_pgbench_random_seed_$i" => q{-- test random functions
\set ur random(1000, 1999)
\set er random_exponential(2000, 2999, 2.0)
\set gr random_gaussian(3000, 3999, 3.0)
}
# check that all runs generated the same 4 values
-my ($ret, $out, $err) =
- $node->psql('postgres',
- 'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val');
+my ($ret, $out, $err) = $node->psql('postgres',
+'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val'
+);
-ok($ret == 0, "psql seeded_random count ok");
+ok($ret == 0, "psql seeded_random count ok");
ok($err eq '', "psql seeded_random count stderr is empty");
-ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/, "psql seeded_random count uniform");
-ok($out =~ /\b$seed\|exponential\|2\d\d\d\|2/, "psql seeded_random count exponential");
-ok($out =~ /\b$seed\|gaussian\|3\d\d\d\|2/, "psql seeded_random count gaussian");
-ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/, "psql seeded_random count zipfian");
+ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/,
+ "psql seeded_random count uniform");
+ok( $out =~ /\b$seed\|exponential\|2\d\d\d\|2/,
+ "psql seeded_random count exponential");
+ok( $out =~ /\b$seed\|gaussian\|3\d\d\d\|2/,
+ "psql seeded_random count gaussian");
+ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/,
+ "psql seeded_random count zipfian");
$node->safe_psql('postgres', 'DROP TABLE seeded_random;');
# SQL
[ 'sql syntax error',
0,
- [ qr{ERROR: syntax error}, qr{prepared statement .* does not exist}
- ],
+ [ qr{ERROR: syntax error},
+ qr{prepared statement .* does not exist} ],
q{-- SQL syntax error
SELECT 1 + ;
} ],
} ],
# SHELL
- [ 'shell bad command', 0,
+ [ 'shell bad command', 0,
[qr{\(shell\) .* meta-command failed}], q{\shell no-such-command} ],
[ 'shell undefined variable', 0,
[qr{undefined variable ":nosuchvariable"}],
0,
[qr{exponential parameter must be greater }],
q{\set i random_exponential(0, 10, 0.0)} ],
- [ 'set zipfian param to 1',
+ [ 'set zipfian param to 1',
0,
[qr{zipfian parameter must be in range \(0, 1\) U \(1, \d+\]}],
q{\set i random_zipfian(0, 10, 1)} ],
- [ 'set zipfian param too large',
+ [ 'set zipfian param too large',
0,
[qr{zipfian parameter must be in range \(0, 1\) U \(1, \d+\]}],
q{\set i random_zipfian(0, 10, 1000000)} ],
[ 'set non numeric value', 0,
[qr{malformed variable "foo" value: "bla"}], q{\set i :foo + 1} ],
- [ 'set no expression',
- 1,
- [qr{syntax error}],
- q{\set i} ],
- [ 'set missing argument',
+ [ 'set no expression', 1, [qr{syntax error}], q{\set i} ],
+ [ 'set missing argument', 1, [qr{missing argument}i], q{\set} ],
+ [ 'set not a bool', 0,
+ [qr{cannot coerce double to boolean}], q{\set b NOT 0.0} ],
+ [ 'set not an int', 0,
+ [qr{cannot coerce boolean to int}], q{\set i TRUE + 2} ],
+ [ 'set not a double', 0,
+ [qr{cannot coerce boolean to double}], q{\set d ln(TRUE)} ],
+ [ 'set case error',
1,
- [qr{missing argument}i],
- q{\set} ],
- [ 'set not a bool',
- 0,
- [ qr{cannot coerce double to boolean} ],
- q{\set b NOT 0.0} ],
- [ 'set not an int',
- 0,
- [ qr{cannot coerce boolean to int} ],
- q{\set i TRUE + 2} ],
- [ 'set not a double',
- 0,
- [ qr{cannot coerce boolean to double} ],
- q{\set d ln(TRUE)} ],
- [ 'set case error',
- 1,
- [ qr{syntax error in command "set"} ],
+ [qr{syntax error in command "set"}],
q{\set i CASE TRUE THEN 1 ELSE 0 END} ],
- [ 'set random error',
- 0,
- [ qr{cannot coerce boolean to int} ],
- q{\set b random(FALSE, TRUE)} ],
- [ 'set number of args mismatch',
- 1,
- [ qr{unexpected number of arguments} ],
- q{\set d ln(1.0, 2.0))} ],
- [ 'set at least one arg',
- 1,
- [ qr{at least one argument expected} ],
- q{\set i greatest())} ],
+ [ 'set random error', 0,
+ [qr{cannot coerce boolean to int}], q{\set b random(FALSE, TRUE)} ],
+ [ 'set number of args mismatch', 1,
+ [qr{unexpected number of arguments}], q{\set d ln(1.0, 2.0))} ],
+ [ 'set at least one arg', 1,
+ [qr{at least one argument expected}], q{\set i greatest())} ],
# SETSHELL
[ 'setshell not an int', 0,
[ 'misc invalid backslash command', 1,
[qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand} ],
[ 'misc empty script', 1, [qr{empty command list for script}], q{} ],
- [ 'bad boolean', 0, [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ],
- );
+ [ 'bad boolean', 0,
+ [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ],);
for my $e (@errors)
my $n = '001_pgbench_error_' . $name;
$n =~ s/ /_/g;
pgbench(
- '-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared',
+'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX -M prepared',
$status,
[ $status ? qr{^$} : qr{processed: 0/1} ],
$re,
pgbench(
'-t 1', 0,
[ qr{processed: 1/1}, qr{zipfian cache array overflowed 1 time\(s\)} ],
- [ qr{^} ],
+ [qr{^}],
'pgbench zipfian array overflow on random_zipfian',
{ '001_pgbench_random_zipfian' => q{
\set i random_zipfian(1, 100, 0.5)
my $testdir = "$TestLib::tmp_check/t_${testname}_stuff";
mkdir $testdir
- or
- BAIL_OUT("could not create test directory \"${testdir}\": $!");
+ or BAIL_OUT("could not create test directory \"${testdir}\": $!");
# invoke pgbench
sub pgbench
for my $fn (sort keys %$files)
{
my $filename = $testdir . '/' . $fn;
+
# cleanup file weight if any
$filename =~ s/\@\d+$//;
+
# cleanup from prior runs
unlink $filename;
append_to_file($filename, $$files{$fn});
[ 'ambiguous builtin', '-b s', [qr{ambiguous}] ],
[ '--progress-timestamp => --progress', '--progress-timestamp',
[qr{allowed only under}] ],
- [ '-I without init option', '-I dtg',
+ [ '-I without init option',
+ '-I dtg',
[qr{cannot be used in benchmarking mode}] ],
- [ 'invalid init step', '-i -I dta',
- [qr{unrecognized initialization step},
- qr{allowed steps are} ] ],
- [ 'bad random seed', '--random-seed=one',
- [qr{unrecognized random seed option "one": expecting an unsigned integer, "time" or "rand"},
- qr{error while setting random seed from --random-seed option} ] ],
+ [ 'invalid init step',
+ '-i -I dta',
+ [ qr{unrecognized initialization step}, qr{allowed steps are} ] ],
+ [ 'bad random seed',
+ '--random-seed=one',
+ [
+qr{unrecognized random seed option "one": expecting an unsigned integer, "time" or "rand"},
+ qr{error while setting random seed from --random-seed option} ] ],
# loging sub-options
[ 'sampling => log', '--sampling-rate=0.01',
'pgbench builtin list');
my @script_tests = (
+
# name, err, { file => contents }
- [ 'missing endif', [qr{\\if without matching \\endif}], {'if-noendif.sql' => '\if 1'} ],
- [ 'missing if on elif', [qr{\\elif without matching \\if}], {'elif-noif.sql' => '\elif 1'} ],
- [ 'missing if on else', [qr{\\else without matching \\if}], {'else-noif.sql' => '\else'} ],
- [ 'missing if on endif', [qr{\\endif without matching \\if}], {'endif-noif.sql' => '\endif'} ],
- [ 'elif after else', [qr{\\elif after \\else}], {'else-elif.sql' => "\\if 1\n\\else\n\\elif 0\n\\endif"} ],
- [ 'else after else', [qr{\\else after \\else}], {'else-else.sql' => "\\if 1\n\\else\n\\else\n\\endif"} ],
- [ 'if syntax error', [qr{syntax error in command "if"}], {'if-bad.sql' => "\\if\n\\endif\n"} ],
- [ 'elif syntax error', [qr{syntax error in command "elif"}], {'elif-bad.sql' => "\\if 0\n\\elif +\n\\endif\n"} ],
- [ 'else syntax error', [qr{unexpected argument in command "else"}], {'else-bad.sql' => "\\if 0\n\\else BAD\n\\endif\n"} ],
- [ 'endif syntax error', [qr{unexpected argument in command "endif"}], {'endif-bad.sql' => "\\if 0\n\\endif BAD\n"} ],
-);
+ [ 'missing endif',
+ [qr{\\if without matching \\endif}],
+ { 'if-noendif.sql' => '\if 1' } ],
+ [ 'missing if on elif',
+ [qr{\\elif without matching \\if}],
+ { 'elif-noif.sql' => '\elif 1' } ],
+ [ 'missing if on else',
+ [qr{\\else without matching \\if}],
+ { 'else-noif.sql' => '\else' } ],
+ [ 'missing if on endif',
+ [qr{\\endif without matching \\if}],
+ { 'endif-noif.sql' => '\endif' } ],
+ [ 'elif after else',
+ [qr{\\elif after \\else}],
+ { 'else-elif.sql' => "\\if 1\n\\else\n\\elif 0\n\\endif" } ],
+ [ 'else after else',
+ [qr{\\else after \\else}],
+ { 'else-else.sql' => "\\if 1\n\\else\n\\else\n\\endif" } ],
+ [ 'if syntax error',
+ [qr{syntax error in command "if"}],
+ { 'if-bad.sql' => "\\if\n\\endif\n" } ],
+ [ 'elif syntax error',
+ [qr{syntax error in command "elif"}],
+ { 'elif-bad.sql' => "\\if 0\n\\elif +\n\\endif\n" } ],
+ [ 'else syntax error',
+ [qr{unexpected argument in command "else"}],
+ { 'else-bad.sql' => "\\if 0\n\\else BAD\n\\endif\n" } ],
+ [ 'endif syntax error',
+ [qr{unexpected argument in command "endif"}],
+ { 'endif-bad.sql' => "\\if 0\n\\endif BAD\n" } ],);
for my $t (@script_tests)
{
my ($name, $err, $files) = @$t;
- pgbench_scripts('', 1, [qr{^$}], $err, 'pgbench option error: ' . $name, $files);
+ pgbench_scripts('', 1, [qr{^$}], $err, 'pgbench option error: ' . $name,
+ $files);
}
done_testing();
$node->start;
# use a long timeout for the benefit of very slow buildfarm machines
-$node->command_ok([qw(pg_isready --timeout=60)], 'succeeds with server running');
+$node->command_ok([qw(pg_isready --timeout=60)],
+ 'succeeds with server running');
$node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)],
'vacuumdb with connection string');
-$node->command_fails([qw(vacuumdb -Zt pg_am;ABORT postgres)],
+$node->command_fails(
+ [qw(vacuumdb -Zt pg_am;ABORT postgres)],
'trailing command in "-t", without COLUMNS');
+
# Unwanted; better if it failed.
-$node->command_ok([qw(vacuumdb -Zt pg_am(amname);ABORT postgres)],
+$node->command_ok(
+ [qw(vacuumdb -Zt pg_am(amname);ABORT postgres)],
'trailing command in "-t", with COLUMNS');
-$node->safe_psql('postgres', q|
+$node->safe_psql(
+ 'postgres', q|
CREATE TABLE "need""q(uot" (")x" text);
CREATE FUNCTION f0(int) RETURNS int LANGUAGE SQL AS 'SELECT $1 * $1';
|);
$node->command_ok([qw|vacuumdb -Z --table="need""q(uot"(")x") postgres|],
'column list');
-$node->command_fails([qw|vacuumdb -Zt funcidx postgres|],
+$node->command_fails(
+ [qw|vacuumdb -Zt funcidx postgres|],
'unqualifed name via functional index');
next if /^CATALOG\(.*BKI_BOOTSTRAP/;
next
unless /\boid *=> *'(\d+)'/
- || /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+),/
- || /^CATALOG\([^,]*, *(\d+)/
- || /^DECLARE_INDEX\([^,]*, *(\d+)/
- || /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/
- || /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/;
+ || /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+),/
+ || /^CATALOG\([^,]*, *(\d+)/
+ || /^DECLARE_INDEX\([^,]*, *(\d+)/
+ || /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/
+ || /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/;
$oidcounts{$1}++;
$oidcounts{$2}++ if $2;
}
my $header = "$1.h";
die "There in no header file corresponding to $datfile"
- if ! -e $header;
+ if !-e $header;
my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname};
{
my $attname = $column->{name};
die "strip_default_values: $catname.$attname undefined\n"
- if ! defined $row->{$attname};
+ if !defined $row->{$attname};
# Delete values that match defaults.
if (defined $column->{default}
}
# Also delete pg_proc.pronargs, since that can be recomputed.
- if ($catname eq 'pg_proc' && $attname eq 'pronargs' &&
- defined($row->{proargtypes}))
+ if ( $catname eq 'pg_proc'
+ && $attname eq 'pronargs'
+ && defined($row->{proargtypes}))
{
delete $row->{$attname};
}
# data files.
sub format_hash
{
- my $data = shift;
+ my $data = shift;
my @orig_attnames = @_;
# Copy attname to new array if it has a value, so we can determine
my $char_count = 1;
my $threshold;
- my $hash_str = '';
+ my $hash_str = '';
my $element_count = 0;
foreach my $attname (@attnames)
# Include a leading space in the key-value pair, since this will
# always go after either a comma or an additional padding space on
# the next line.
- my $element = " $attname => '$value'";
+ my $element = " $attname => '$value'";
my $element_length = length($element);
# If adding the element to the current line would expand the line
if ($^O eq 'darwin')
{
- $krb5_bin_dir = '/usr/local/opt/krb5/bin';
+ $krb5_bin_dir = '/usr/local/opt/krb5/bin';
$krb5_sbin_dir = '/usr/local/opt/krb5/sbin';
}
elsif ($^O eq 'freebsd')
{
- $krb5_bin_dir = '/usr/local/bin';
+ $krb5_bin_dir = '/usr/local/bin';
$krb5_sbin_dir = '/usr/local/sbin';
}
elsif ($^O eq 'linux')
$krb5_sbin_dir = '/usr/sbin';
}
-my $krb5_config = 'krb5-config';
-my $kinit = 'kinit';
-my $kdb5_util = 'kdb5_util';
+my $krb5_config = 'krb5-config';
+my $kinit = 'kinit';
+my $kdb5_util = 'kdb5_util';
my $kadmin_local = 'kadmin.local';
-my $krb5kdc = 'krb5kdc';
+my $krb5kdc = 'krb5kdc';
if ($krb5_bin_dir && -d $krb5_bin_dir)
{
$krb5_config = $krb5_bin_dir . '/' . $krb5_config;
- $kinit = $krb5_bin_dir . '/' . $kinit;
+ $kinit = $krb5_bin_dir . '/' . $kinit;
}
if ($krb5_sbin_dir && -d $krb5_sbin_dir)
{
- $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util;
+ $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util;
$kadmin_local = $krb5_sbin_dir . '/' . $kadmin_local;
- $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc;
+ $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc;
}
my $realm = 'EXAMPLE.COM';
-my $krb5_conf = "${TestLib::tmp_check}/krb5.conf";
-my $kdc_conf = "${TestLib::tmp_check}/kdc.conf";
-my $krb5_log = "${TestLib::tmp_check}/krb5libs.log";
-my $kdc_log = "${TestLib::tmp_check}/krb5kdc.log";
-my $kdc_port = int(rand() * 16384) + 49152;
+my $krb5_conf = "${TestLib::tmp_check}/krb5.conf";
+my $kdc_conf = "${TestLib::tmp_check}/kdc.conf";
+my $krb5_log = "${TestLib::tmp_check}/krb5libs.log";
+my $kdc_log = "${TestLib::tmp_check}/krb5kdc.log";
+my $kdc_port = int(rand() * 16384) + 49152;
my $kdc_datadir = "${TestLib::tmp_check}/krb5kdc";
my $kdc_pidfile = "${TestLib::tmp_check}/krb5kdc.pid";
-my $keytab = "${TestLib::tmp_check}/krb5.keytab";
+my $keytab = "${TestLib::tmp_check}/krb5.keytab";
note "setting up Kerberos";
my ($stdout, $krb5_version);
-run_log [ $krb5_config, '--version' ], '>', \$stdout or BAIL_OUT("could not execute krb5-config");
+run_log [ $krb5_config, '--version' ], '>', \$stdout
+ or BAIL_OUT("could not execute krb5-config");
BAIL_OUT("Heimdal is not supported") if $stdout =~ m/heimdal/;
-$stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/ or BAIL_OUT("could not get Kerberos version");
+$stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/
+ or BAIL_OUT("could not get Kerberos version");
$krb5_version = $1;
-append_to_file($krb5_conf,
-qq![logging]
+append_to_file(
+ $krb5_conf,
+ qq![logging]
default = FILE:$krb5_log
kdc = FILE:$kdc_log
kdc = localhost:$kdc_port
}!);
-append_to_file($kdc_conf,
-qq![kdcdefaults]
+append_to_file(
+ $kdc_conf,
+ qq![kdcdefaults]
!);
+
# For new-enough versions of krb5, use the _listen settings rather
# than the _ports settings so that we can bind to localhost only.
if ($krb5_version >= 1.15)
{
- append_to_file($kdc_conf,
-qq!kdc_listen = localhost:$kdc_port
+ append_to_file(
+ $kdc_conf,
+ qq!kdc_listen = localhost:$kdc_port
kdc_tcp_listen = localhost:$kdc_port
!);
}
else
{
- append_to_file($kdc_conf,
-qq!kdc_ports = $kdc_port
+ append_to_file(
+ $kdc_conf,
+ qq!kdc_ports = $kdc_port
kdc_tcp_ports = $kdc_port
!);
}
-append_to_file($kdc_conf,
-qq!
+append_to_file(
+ $kdc_conf,
+ qq!
[realms]
$realm = {
database_name = $kdc_datadir/principal
mkdir $kdc_datadir or die;
-$ENV{'KRB5_CONFIG'} = $krb5_conf;
+$ENV{'KRB5_CONFIG'} = $krb5_conf;
$ENV{'KRB5_KDC_PROFILE'} = $kdc_conf;
my $service_principal = "$ENV{with_krb_srvnam}/localhost";
END
{
- kill 'INT', `cat $kdc_pidfile` if -f $kdc_pidfile;
+ kill 'INT', `cat $kdc_pidfile` if -f $kdc_pidfile;
}
note "setting up PostgreSQL instance";
my ($node, $role, $expected_res, $test_name) = @_;
# need to connect over TCP/IP for Kerberos
- my $res = $node->psql('postgres', 'SELECT 1',
- extra_params => [ '-d', $node->connstr('postgres').' host=localhost',
- '-U', $role ]);
+ my $res = $node->psql(
+ 'postgres',
+ 'SELECT 1',
+ extra_params => [
+ '-d', $node->connstr('postgres') . ' host=localhost',
+ '-U', $role ]);
is($res, $expected_res, $test_name);
}
truncate($node->data_dir . '/pg_ident.conf', 0);
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{host all all localhost gss include_realm=0});
+$node->append_conf('pg_hba.conf',
+ qq{host all all localhost gss include_realm=0});
$node->restart;
test_access($node, 'test1', 0, 'succeeds with include_realm=0');
my ($slapd, $ldap_bin_dir, $ldap_schema_dir);
-$ldap_bin_dir = undef; # usually in PATH
+$ldap_bin_dir = undef; # usually in PATH
if ($^O eq 'darwin')
{
- $slapd = '/usr/local/opt/openldap/libexec/slapd';
+ $slapd = '/usr/local/opt/openldap/libexec/slapd';
$ldap_schema_dir = '/usr/local/etc/openldap/schema';
}
elsif ($^O eq 'linux')
{
- $slapd = '/usr/sbin/slapd';
+ $slapd = '/usr/sbin/slapd';
$ldap_schema_dir = '/etc/ldap/schema' if -d '/etc/ldap/schema';
$ldap_schema_dir = '/etc/openldap/schema' if -d '/etc/openldap/schema';
}
elsif ($^O eq 'freebsd')
{
- $slapd = '/usr/local/libexec/slapd';
+ $slapd = '/usr/local/libexec/slapd';
$ldap_schema_dir = '/usr/local/etc/openldap/schema';
}
$ENV{PATH} = "$ldap_bin_dir:$ENV{PATH}" if $ldap_bin_dir;
-my $ldap_datadir = "${TestLib::tmp_check}/openldap-data";
-my $slapd_certs = "${TestLib::tmp_check}/slapd-certs";
-my $slapd_conf = "${TestLib::tmp_check}/slapd.conf";
+my $ldap_datadir = "${TestLib::tmp_check}/openldap-data";
+my $slapd_certs = "${TestLib::tmp_check}/slapd-certs";
+my $slapd_conf = "${TestLib::tmp_check}/slapd.conf";
my $slapd_pidfile = "${TestLib::tmp_check}/slapd.pid";
my $slapd_logfile = "${TestLib::tmp_check}/slapd.log";
-my $ldap_conf = "${TestLib::tmp_check}/ldap.conf";
-my $ldap_server = 'localhost';
-my $ldap_port = int(rand() * 16384) + 49152;
-my $ldaps_port = $ldap_port + 1;
-my $ldap_url = "ldap://$ldap_server:$ldap_port";
-my $ldaps_url = "ldaps://$ldap_server:$ldaps_port";
-my $ldap_basedn = 'dc=example,dc=net';
-my $ldap_rootdn = 'cn=Manager,dc=example,dc=net';
-my $ldap_rootpw = 'secret';
-my $ldap_pwfile = "${TestLib::tmp_check}/ldappassword";
+my $ldap_conf = "${TestLib::tmp_check}/ldap.conf";
+my $ldap_server = 'localhost';
+my $ldap_port = int(rand() * 16384) + 49152;
+my $ldaps_port = $ldap_port + 1;
+my $ldap_url = "ldap://$ldap_server:$ldap_port";
+my $ldaps_url = "ldaps://$ldap_server:$ldaps_port";
+my $ldap_basedn = 'dc=example,dc=net';
+my $ldap_rootdn = 'cn=Manager,dc=example,dc=net';
+my $ldap_rootpw = 'secret';
+my $ldap_pwfile = "${TestLib::tmp_check}/ldappassword";
note "setting up slapd";
-append_to_file($slapd_conf,
-qq{include $ldap_schema_dir/core.schema
+append_to_file(
+ $slapd_conf,
+ qq{include $ldap_schema_dir/core.schema
include $ldap_schema_dir/cosine.schema
include $ldap_schema_dir/nis.schema
include $ldap_schema_dir/inetorgperson.schema
rootpw $ldap_rootpw});
# don't bother to check the server's cert (though perhaps we should)
-append_to_file($ldap_conf,
-qq{TLS_REQCERT never
+append_to_file(
+ $ldap_conf,
+ qq{TLS_REQCERT never
});
mkdir $ldap_datadir or die;
-mkdir $slapd_certs or die;
-
-system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", "$slapd_certs/ca.key", "-x509", "-out", "$slapd_certs/ca.crt", "-subj", "/cn=CA";
-system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", "$slapd_certs/server.key", "-out", "$slapd_certs/server.csr", "-subj", "/cn=server";
-system_or_bail "openssl", "x509", "-req", "-in", "$slapd_certs/server.csr", "-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key", "-CAcreateserial", "-out", "$slapd_certs/server.crt";
+mkdir $slapd_certs or die;
+
+system_or_bail "openssl", "req", "-new", "-nodes", "-keyout",
+ "$slapd_certs/ca.key", "-x509", "-out", "$slapd_certs/ca.crt", "-subj",
+ "/cn=CA";
+system_or_bail "openssl", "req", "-new", "-nodes", "-keyout",
+ "$slapd_certs/server.key", "-out", "$slapd_certs/server.csr", "-subj",
+ "/cn=server";
+system_or_bail "openssl", "x509", "-req", "-in", "$slapd_certs/server.csr",
+ "-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key",
+ "-CAcreateserial", "-out", "$slapd_certs/server.crt";
system_or_bail $slapd, '-f', $slapd_conf, '-h', "$ldap_url $ldaps_url";
append_to_file($ldap_pwfile, $ldap_rootpw);
chmod 0600, $ldap_pwfile or die;
-$ENV{'LDAPURI'} = $ldap_url;
+$ENV{'LDAPURI'} = $ldap_url;
$ENV{'LDAPBINDDN'} = $ldap_rootdn;
-$ENV{'LDAPCONF'} = $ldap_conf;
+$ENV{'LDAPCONF'} = $ldap_conf;
note "loading LDAP data";
-system_or_bail 'ldapadd', '-x', '-y', $ldap_pwfile, '-f', 'authdata.ldif';
-system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret1', 'uid=test1,dc=example,dc=net';
-system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret2', 'uid=test2,dc=example,dc=net';
+system_or_bail 'ldapadd', '-x', '-y', $ldap_pwfile, '-f', 'authdata.ldif';
+system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret1',
+ 'uid=test1,dc=example,dc=net';
+system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret2',
+ 'uid=test2,dc=example,dc=net';
note "setting up PostgreSQL instance";
{
my ($node, $role, $expected_res, $test_name) = @_;
- my $res = $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
- is($res, $expected_res, $test_name);
+ my $res =
+ $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
+ is($res, $expected_res, $test_name);
}
note "simple bind";
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="uid=" ldapsuffix=",dc=example,dc=net"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="uid=" ldapsuffix=",dc=example,dc=net"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'wrong';
-test_access($node, 'test0', 2, 'simple bind authentication fails if user not found in LDAP');
-test_access($node, 'test1', 2, 'simple bind authentication fails with wrong password');
+test_access($node, 'test0', 2,
+ 'simple bind authentication fails if user not found in LDAP');
+test_access($node, 'test1', 2,
+ 'simple bind authentication fails with wrong password');
$ENV{"PGPASSWORD"} = 'secret1';
test_access($node, 'test1', 0, 'simple bind authentication succeeds');
note "search+bind";
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'wrong';
-test_access($node, 'test0', 2, 'search+bind authentication fails if user not found in LDAP');
-test_access($node, 'test1', 2, 'search+bind authentication fails with wrong password');
+test_access($node, 'test0', 2,
+ 'search+bind authentication fails if user not found in LDAP');
+test_access($node, 'test1', 2,
+ 'search+bind authentication fails with wrong password');
$ENV{"PGPASSWORD"} = 'secret1';
test_access($node, 'test1', 0, 'search+bind authentication succeeds');
note "LDAP URLs";
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"});
+$node->append_conf('pg_hba.conf',
+ qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"});
$node->restart;
$ENV{"PGPASSWORD"} = 'wrong';
-test_access($node, 'test0', 2, 'search+bind with LDAP URL authentication fails if user not found in LDAP');
-test_access($node, 'test1', 2, 'search+bind with LDAP URL authentication fails with wrong password');
+test_access($node, 'test0', 2,
+ 'search+bind with LDAP URL authentication fails if user not found in LDAP'
+);
+test_access($node, 'test1', 2,
+ 'search+bind with LDAP URL authentication fails with wrong password');
$ENV{"PGPASSWORD"} = 'secret1';
-test_access($node, 'test1', 0, 'search+bind with LDAP URL authentication succeeds');
+test_access($node, 'test1', 0,
+ 'search+bind with LDAP URL authentication succeeds');
note "search filters";
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
note "search filters in LDAP URLs";
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub?(|(uid=\$username)(mail=\$username))"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub?(|(uid=\$username)(mail=\$username))"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
# settings. ldapurl is always parsed first, then the other settings
# override. It might be useful in a case like this.
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
# note bad ldapprefix with a question mark that triggers a diagnostic message
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="?uid=" ldapsuffix=""});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="?uid=" ldapsuffix=""}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
# request StartTLS with ldaptls=1
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)" ldaptls=1});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)" ldaptls=1}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
# request LDAPS with ldapscheme=ldaps
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapserver=$ldap_server ldapscheme=ldaps ldapport=$ldaps_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapserver=$ldap_server ldapscheme=ldaps ldapport=$ldaps_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
# request LDAPS with ldapurl=ldaps://...
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)"});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)"}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
# bad combination of LDAPS and StartTLS
unlink($node->data_dir . '/pg_hba.conf');
-$node->append_conf('pg_hba.conf', qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)" ldaptls=1});
+$node->append_conf('pg_hba.conf',
+qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)" ldaptls=1}
+);
$node->restart;
$ENV{"PGPASSWORD"} = 'secret1';
$node->safe_psql('postgres', 'create extension pageinspect');
# Create a table with an autosummarizing BRIN index
-$node->safe_psql('postgres',
+$node->safe_psql(
+ 'postgres',
'create table brin_wi (a int) with (fillfactor = 10);
create index brin_wi_idx on brin_wi using brin (a) with (pages_per_range=1, autosummarize=on);
'
);
my $count = $node->safe_psql('postgres',
- "select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
+"select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
);
is($count, '1', "initial index state is correct");
$node->safe_psql('postgres',
'insert into brin_wi select * from generate_series(1, 100)');
-$node->poll_query_until('postgres',
- "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)",
+$node->poll_query_until(
+ 'postgres',
+"select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)",
't');
$count = $node->safe_psql('postgres',
- "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
+"select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
);
is($count, 't', "index got summarized");
$node->stop;
'--no-sync',
"--file=$tempdir/createdb.sql",
'-C',
- '-R', # no-op, just for testing
+ '-R', # no-op, just for testing
'postgres', ], },
data_only => {
dump_cmd => [
'--no-sync',
"--file=$tempdir/data_only.sql",
'-a',
- '-v', # no-op, just make sure it works
+ '-v', # no-op, just make sure it works
'postgres', ], },
defaults => {
dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ],
# Tests which are considered 'full' dumps by pg_dump, but there
# are flags used to exclude specific items (ACLs, blobs, etc).
my %full_runs = (
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- no_privs => 1,
- no_owner => 1,
-);
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ no_privs => 1,
+ no_owner => 1,);
my %tests = (
'ALTER EXTENSION test_pg_dump' => {
\n\s+\Qcol1 integer NOT NULL,\E
\n\s+\Qcol2 integer\E
\n\);\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE EXTENSION test_pg_dump' => {
create_order => 2,
\n/xm,
like => {
%full_runs,
- schema_only => 1,
+ schema_only => 1,
section_pre_data => 1, },
- unlike => {
- binary_upgrade => 1, }, },
+ unlike => { binary_upgrade => 1, }, },
'CREATE ROLE regress_dump_test_role' => {
create_order => 1,
\n\s+\QNO MAXVALUE\E
\n\s+\QCACHE 1;\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE TABLE regress_pg_dump_table_added' => {
create_order => 7,
\n\s+\Qcol1 integer NOT NULL,\E
\n\s+\Qcol2 integer\E
\n\);\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE SEQUENCE regress_pg_dump_seq' => {
regexp => qr/^
\n\s+\QNO MAXVALUE\E
\n\s+\QCACHE 1;\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'SETVAL SEQUENCE regress_seq_dumpable' => {
create_order => 6,
\n/xm,
like => {
%full_runs,
- data_only => 1,
+ data_only => 1,
section_data => 1, }, },
'CREATE TABLE regress_pg_dump_table' => {
\n\s+\Qcol1 integer NOT NULL,\E
\n\s+\Qcol2 integer\E
\n\);\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE ACCESS METHOD regress_test_am' => {
regexp => qr/^
\QCREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'COMMENT ON EXTENSION test_pg_dump' => {
regexp => qr/^
regexp => qr/^
\QGRANT SELECT ON TABLE public.regress_pg_dump_table_added TO regress_dump_test_role;\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'REVOKE SELECT regress_pg_dump_table_added post-ALTER EXTENSION' => {
create_order => 10,
%full_runs,
schema_only => 1,
section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ unlike => { no_privs => 1, }, },
'GRANT SELECT ON TABLE regress_pg_dump_table' => {
regexp => qr/^
\QGRANT SELECT ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'GRANT SELECT(col1) ON regress_pg_dump_table' => {
regexp => qr/^
\QGRANT SELECT(col1) ON TABLE public.regress_pg_dump_table TO PUBLIC;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
- 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' =>
- { create_order => 4,
+ 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role'
+ => {
+ create_order => 4,
create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
TO regress_dump_test_role;',
regexp => qr/^
%full_runs,
schema_only => 1,
section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ unlike => { no_privs => 1, }, },
'GRANT USAGE ON regress_pg_dump_table_col1_seq TO regress_dump_test_role'
=> {
%full_runs,
schema_only => 1,
section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ unlike => { no_privs => 1, }, },
'GRANT USAGE ON regress_pg_dump_seq TO regress_dump_test_role' => {
regexp => qr/^
\QGRANT USAGE ON SEQUENCE public.regress_pg_dump_seq TO regress_dump_test_role;\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'REVOKE SELECT(col1) ON regress_pg_dump_table' => {
create_order => 3,
%full_runs,
schema_only => 1,
section_pre_data => 1, },
- unlike => {
- no_privs => 1, }, },
+ unlike => { no_privs => 1, }, },
# Objects included in extension part of a schema created by this extension */
'CREATE TABLE regress_pg_dump_schema.test_table' => {
\n\s+\Qcol1 integer,\E
\n\s+\Qcol2 integer\E
\n\);\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'GRANT SELECT ON regress_pg_dump_schema.test_table' => {
regexp => qr/^
\QGRANT SELECT ON TABLE regress_pg_dump_schema.test_table TO regress_dump_test_role;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE SEQUENCE regress_pg_dump_schema.test_seq' => {
regexp => qr/^
\n\s+\QNO MAXVALUE\E
\n\s+\QCACHE 1;\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'GRANT USAGE ON regress_pg_dump_schema.test_seq' => {
regexp => qr/^
\QGRANT USAGE ON SEQUENCE regress_pg_dump_schema.test_seq TO regress_dump_test_role;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE TYPE regress_pg_dump_schema.test_type' => {
regexp => qr/^
\QCREATE TYPE regress_pg_dump_schema.test_type AS (\E
\n\s+\Qcol1 integer\E
\n\);\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'GRANT USAGE ON regress_pg_dump_schema.test_type' => {
regexp => qr/^
\QGRANT ALL ON TYPE regress_pg_dump_schema.test_type TO regress_dump_test_role;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE FUNCTION regress_pg_dump_schema.test_func' => {
regexp => qr/^
\QCREATE FUNCTION regress_pg_dump_schema.test_func() RETURNS integer\E
\n\s+\QLANGUAGE sql\E
\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'GRANT ALL ON regress_pg_dump_schema.test_func' => {
regexp => qr/^
\QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_func() TO regress_dump_test_role;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'CREATE AGGREGATE regress_pg_dump_schema.test_agg' => {
regexp => qr/^
\n\s+\QSFUNC = int2_sum,\E
\n\s+\QSTYPE = bigint\E
\n\);\n/xm,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
'GRANT ALL ON regress_pg_dump_schema.test_agg' => {
regexp => qr/^
\QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_agg(smallint) TO regress_dump_test_role;\E\n
\QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E
\n/xms,
- like => { binary_upgrade => 1, }, },
+ like => { binary_upgrade => 1, }, },
# Objects not included in extension, part of schema created by extension
'CREATE TABLE regress_pg_dump_schema.external_tab' => {
\n\);\n/xm,
like => {
%full_runs,
- schema_only => 1,
- section_pre_data => 1, }, },
-);
+ schema_only => 1,
+ section_pre_data => 1, }, },);
#########################################
# Create a PG instance to test actually dumping from
# Then count all the tests run against each run
foreach my $test (sort keys %tests)
{
- # If there is a like entry, but no unlike entry, then we will test the like case
- if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key}))
+# If there is a like entry, but no unlike entry, then we will test the like case
+ if ($tests{$test}->{like}->{$test_key}
+ && !defined($tests{$test}->{unlike}->{$test_key}))
{
$num_tests++;
}
{
# Run the test listed as a like, unless it is specifically noted
# as an unlike (generally due to an explicit exclusion or similar).
- if ($tests{$test}->{like}->{$test_key} && !defined($tests{$test}->{unlike}->{$test_key}))
+ if ($tests{$test}->{like}->{$test_key}
+ && !defined($tests{$test}->{unlike}->{$test_key}))
{
- if (!ok($output_file =~ $tests{$test}->{regexp}, "$run: should dump $test"))
+ if (!ok($output_file =~ $tests{$test}->{regexp},
+ "$run: should dump $test"))
{
diag("Review $run results in $tempdir");
}
}
else
{
- if (!ok(
- $output_file !~
- $tests{$test}->{regexp},
+ if (!ok($output_file !~ $tests{$test}->{regexp},
"$run: should not dump $test"))
{
diag("Review $run results in $tempdir");
my $dir_stat = stat($self->data_dir);
defined($dir_stat)
- or die('unable to stat ' . $self->data_dir);
+ or die('unable to stat ' . $self->data_dir);
return (S_IMODE($dir_stat->mode) == 0750);
}
}
close $conf;
- chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf")
- or die("unable to set permissions for $pgdata/postgresql.conf");
+ chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf")
+ or die("unable to set permissions for $pgdata/postgresql.conf");
$self->set_replication_conf if $params{allows_streaming};
$self->enable_archiving if $params{has_archiving};
TestLib::append_to_file($conffile, $str . "\n");
- chmod($self->group_access() ? 0640 : 0600, $conffile)
- or die("unable to set permissions for $conffile");
+ chmod($self->group_access() ? 0640 : 0600, $conffile)
+ or die("unable to set permissions for $conffile");
}
=pod
}
else
{
- $lsn_expr = 'pg_current_wal_lsn()'
+ $lsn_expr = 'pg_current_wal_lsn()';
}
print "Waiting for replication conn "
. $standby_name . "'s "
sub pg_recvlogical_upto
{
- my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) =
- @_;
+ my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options)
+ = @_;
my ($stdout, $stderr);
my $timeout_exception = 'pg_recvlogical timed out';
{
croak "if specified, filterfn must be a subroutine reference"
unless defined(ref $params{filterfn})
- and (ref $params{filterfn} eq 'CODE');
+ and (ref $params{filterfn} eq 'CODE');
$filterfn = $params{filterfn};
}
# not under msys, return the input argument unchanged.
sub real_dir
{
- my $dir = "$_[0]";
- return $dir unless -d $dir;
- return $dir unless $Config{osname} eq 'msys';
- my $here = cwd;
- chdir $dir;
+ my $dir = "$_[0]";
+ return $dir unless -d $dir;
+ return $dir unless $Config{osname} eq 'msys';
+ my $here = cwd;
+ chdir $dir;
+
# this odd way of calling 'pwd -W' is the only way that seems to work.
- $dir = qx{sh -c "pwd -W"};
- chomp $dir;
- chdir $here;
- return $dir;
+ $dir = qx{sh -c "pwd -W"};
+ chomp $dir;
+ chdir $here;
+ return $dir;
}
sub system_log
# Result defaults to true
my $result = 1;
- find
- (
- {follow_fast => 1,
- wanted =>
- sub
- {
+ find(
+ { follow_fast => 1,
+ wanted => sub {
my $file_stat = stat($File::Find::name);
# Is file in the ignore list?
}
defined($file_stat)
- or die("unable to stat $File::Find::name");
+ or die("unable to stat $File::Find::name");
my $file_mode = S_IMODE($file_stat->mode);
{
if ($file_mode != $expected_file_mode)
{
- print(*STDERR,
+ print(
+ *STDERR,
sprintf("$File::Find::name mode must be %04o\n",
- $expected_file_mode));
+ $expected_file_mode));
$result = 0;
return;
}
}
+
# Else a directory?
elsif (S_ISDIR($file_stat->mode))
{
if ($file_mode != $expected_dir_mode)
{
- print(*STDERR,
+ print(
+ *STDERR,
sprintf("$File::Find::name mode must be %04o\n",
- $expected_dir_mode));
+ $expected_dir_mode));
$result = 0;
return;
}
}
+
# Else something we can't handle
else
{
die "unknown file type for $File::Find::name";
}
- }},
- $dir
- );
+ }
+ },
+ $dir);
return $result;
}
{
my ($dir, $dir_mode, $file_mode) = @_;
- find
- (
- {follow_fast => 1,
- wanted =>
- sub
- {
+ find(
+ { follow_fast => 1,
+ wanted => sub {
my $file_stat = stat($File::Find::name);
if (defined($file_stat))
{
- chmod(S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode,
- $File::Find::name)
- or die "unable to chmod $File::Find::name";
+ chmod(
+ S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode,
+ $File::Find::name
+ ) or die "unable to chmod $File::Find::name";
}
- }},
- $dir
- );
+ }
+ },
+ $dir);
}
# Check presence of a given regexp within pg_config.h for the installation
chomp($stdout);
open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!";
- my $match = (grep {/^$regexp/} <$pg_config_h>);
+ my $match = (grep { /^$regexp/ } <$pg_config_h>);
close $pg_config_h;
return $match;
}
$node_master->poll_query_until('postgres',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'test_slot' AND active_pid IS NULL)"
-)
- or die "slot never became inactive";
+) or die "slot never became inactive";
$stdout_recv = $node_master->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 10,
# Ensure that last transaction is replayed on standby.
my $cur_master_lsn =
- $cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
+ $cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $caughtup_query =
- "SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
+ "SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
$cur_standby->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for standby to catch up";
$node->start();
# by default PostgresNode doesn't doesn't restart after a crash
-$node->safe_psql('postgres',
- q[ALTER SYSTEM SET restart_after_crash = 1;
+$node->safe_psql(
+ 'postgres',
+ q[ALTER SYSTEM SET restart_after_crash = 1;
ALTER SYSTEM SET log_connections = 1;
SELECT pg_reload_conf();]);
SELECT pg_backend_pid();
];
ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
- 'acquired pid for SIGQUIT');
+ 'acquired pid for SIGQUIT');
my $pid = $killme_stdout;
chomp($pid);
$killme_stdout = '';
INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status;
];
ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigquit/m),
- 'inserted in-progress-before-sigquit');
+ 'inserted in-progress-before-sigquit');
$killme_stdout = '';
$killme_stderr = '';
SELECT pg_sleep(3600);
];
ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m),
- 'monitor connected');
+ 'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
$killme_stdin .= q[
SELECT 1;
];
-ok(pump_until($killme, \$killme_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m),
- "psql query died successfully after SIGQUIT");
+ok( pump_until(
+ $killme,
+ \$killme_stderr,
+qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m
+ ),
+ "psql query died successfully after SIGQUIT");
$killme_stderr = '';
$killme_stdout = '';
$killme->finish;
# Wait till server restarts - we should get the WARNING here, but
# sometimes the server is unable to send that, if interrupted while
# sending.
-ok(pump_until($monitor, \$monitor_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m),
- "psql monitor died successfully after SIGQUIT");
+ok( pump_until(
+ $monitor,
+ \$monitor_stderr,
+qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m
+ ),
+ "psql monitor died successfully after SIGQUIT");
$monitor->finish;
# Wait till server restarts
-is($node->poll_query_until('postgres', 'SELECT $$restarted after sigquit$$;', 'restarted after sigquit'),
- "1", "reconnected after SIGQUIT");
+is( $node->poll_query_until(
+ 'postgres',
+ 'SELECT $$restarted after sigquit$$;',
+ 'restarted after sigquit'),
+ "1",
+ "reconnected after SIGQUIT");
# restart psql processes, now that the crash cycle finished
SELECT pg_backend_pid();
];
ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
- "acquired pid for SIGKILL");
+ "acquired pid for SIGKILL");
$pid = $killme_stdout;
chomp($pid);
-$pid = $killme_stdout;
+$pid = $killme_stdout;
$killme_stdout = '';
$killme_stderr = '';
INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status;
];
ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigkill/m),
- 'inserted in-progress-before-sigkill');
+ 'inserted in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
SELECT pg_sleep(3600);
];
ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m),
- 'monitor connected');
+ 'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
$killme_stdin .= q[
SELECT 1;
];
-ok(pump_until($killme, \$killme_stderr, qr/server closed the connection unexpectedly/m),
- "psql query died successfully after SIGKILL");
+ok( pump_until(
+ $killme, \$killme_stderr,
+ qr/server closed the connection unexpectedly/m),
+ "psql query died successfully after SIGKILL");
$killme->finish;
# Wait till server restarts - we should get the WARNING here, but
# sometimes the server is unable to send that, if interrupted while
# sending.
-ok(pump_until($monitor, \$monitor_stderr, qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m),
- "psql monitor died successfully after SIGKILL");
+ok( pump_until(
+ $monitor,
+ \$monitor_stderr,
+qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m
+ ),
+ "psql monitor died successfully after SIGKILL");
$monitor->finish;
# Wait till server restarts
-is($node->poll_query_until('postgres', 'SELECT 1', '1'), "1", "reconnected after SIGKILL");
+is($node->poll_query_until('postgres', 'SELECT 1', '1'),
+ "1", "reconnected after SIGKILL");
# Make sure the committed rows survived, in-progress ones not
-is($node->safe_psql('postgres', 'SELECT * FROM alive'),
- "committed-before-sigquit\ncommitted-before-sigkill", 'data survived');
+is( $node->safe_psql('postgres', 'SELECT * FROM alive'),
+ "committed-before-sigquit\ncommitted-before-sigkill",
+ 'data survived');
-is($node->safe_psql('postgres', 'INSERT INTO alive VALUES($$before-orderly-restart$$) RETURNING status'),
- 'before-orderly-restart', 'can still write after crash restart');
+is( $node->safe_psql(
+ 'postgres',
+'INSERT INTO alive VALUES($$before-orderly-restart$$) RETURNING status'),
+ 'before-orderly-restart',
+ 'can still write after crash restart');
# Just to be sure, check that an orderly restart now still works
$node->restart();
-is($node->safe_psql('postgres', 'SELECT * FROM alive'),
- "committed-before-sigquit\ncommitted-before-sigkill\nbefore-orderly-restart", 'data survived');
+is( $node->safe_psql('postgres', 'SELECT * FROM alive'),
+"committed-before-sigquit\ncommitted-before-sigkill\nbefore-orderly-restart",
+ 'data survived');
-is($node->safe_psql('postgres', 'INSERT INTO alive VALUES($$after-orderly-restart$$) RETURNING status'),
- 'after-orderly-restart', 'can still write after orderly restart');
+is( $node->safe_psql(
+ 'postgres',
+ 'INSERT INTO alive VALUES($$after-orderly-restart$$) RETURNING status'
+ ),
+ 'after-orderly-restart',
+ 'can still write after orderly restart');
$node->stop();
if ($psql_timeout->is_expired)
{
diag("aborting wait: program timed out");
- diag("stream contents: >>", $$stream,"<<");
+ diag("stream contents: >>", $$stream, "<<");
diag("pattern searched for: ", $untl);
return 0;
if (not $proc->pumpable())
{
diag("aborting wait: program died");
- diag("stream contents: >>", $$stream,"<<");
+ diag("stream contents: >>", $$stream, "<<");
diag("pattern searched for: ", $untl);
return 0;
}
return 1;
-};
+}
my $realTSDir = TestLib::real_dir($tablespaceDir);
-$node->safe_psql('postgres',
- "CREATE TABLESPACE ts1 LOCATION '$realTSDir'");
+$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$realTSDir'");
$node->safe_psql('postgres',
'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1');
$node->start;
# check unlogged table in base
-ok(-f "$pgdata/${baseUnloggedPath}_init",
- 'init fork in base still exists');
-ok(-f "$pgdata/$baseUnloggedPath",
- 'main fork in base recreated at startup');
-ok( !-f "$pgdata/${baseUnloggedPath}_vm",
+ok(-f "$pgdata/${baseUnloggedPath}_init", 'init fork in base still exists');
+ok(-f "$pgdata/$baseUnloggedPath", 'main fork in base recreated at startup');
+ok(!-f "$pgdata/${baseUnloggedPath}_vm",
'vm fork in base removed at startup');
ok( !-f "$pgdata/${baseUnloggedPath}_fsm",
'fsm fork in base removed at startup');
my ($common_connstr, $connstr, $test_name) = @_;
my $cmd = [
- 'psql', '-X', '-A', '-t', '-c', "SELECT \$\$connected with $connstr\$\$",
+ 'psql', '-X', '-A', '-t', '-c',
+ "SELECT \$\$connected with $connstr\$\$",
'-d', "$common_connstr $connstr" ];
command_ok($cmd, $test_name);
my ($common_connstr, $connstr, $expected_stderr, $test_name) = @_;
my $cmd = [
- 'psql', '-X', '-A', '-t', '-c', "SELECT \$\$connected with $connstr\$\$",
+ 'psql', '-X', '-A', '-t', '-c',
+ "SELECT \$\$connected with $connstr\$\$",
'-d', "$common_connstr $connstr" ];
command_fails_like($cmd, $expected_stderr, $test_name);
if (defined($password))
{
$node->psql('postgres',
-"SET password_encryption='$password_enc'; ALTER USER ssltestuser PASSWORD '$password';");
+"SET password_encryption='$password_enc'; ALTER USER ssltestuser PASSWORD '$password';"
+ );
$node->psql('postgres',
-"SET password_encryption='$password_enc'; ALTER USER anotheruser PASSWORD '$password';");
+"SET password_encryption='$password_enc'; ALTER USER anotheruser PASSWORD '$password';"
+ );
}
# enable logging etc.
sub configure_hba_for_ssl
{
my ($node, $serverhost, $authmethod) = @_;
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
# Only accept SSL connections from localhost. Our tests don't depend on this
# but seems best to keep it as narrow as possible for security reasons.
note "testing password-protected keys";
-open my $sslconf, '>', $node->data_dir."/sslconfig.conf";
+open my $sslconf, '>', $node->data_dir . "/sslconfig.conf";
print $sslconf "ssl=on\n";
print $sslconf "ssl_cert_file='server-cn-only.crt'\n";
print $sslconf "ssl_key_file='server-password.key'\n";
print $sslconf "ssl_passphrase_command='echo wrongpassword'\n";
close $sslconf;
-command_fails(['pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart'],
- 'restart fails with password-protected key file with wrong password');
+command_fails(
+ [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
+ 'restart fails with password-protected key file with wrong password');
$node->_update_pid(0);
-open $sslconf, '>', $node->data_dir."/sslconfig.conf";
+open $sslconf, '>', $node->data_dir . "/sslconfig.conf";
print $sslconf "ssl=on\n";
print $sslconf "ssl_cert_file='server-cn-only.crt'\n";
print $sslconf "ssl_key_file='server-password.key'\n";
print $sslconf "ssl_passphrase_command='echo secret1'\n";
close $sslconf;
-command_ok(['pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart'],
- 'restart succeeds with password-protected key file');
+command_ok(
+ [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
+ 'restart succeeds with password-protected key file');
$node->_update_pid(1);
### Run client-side tests.
"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
# The server should not accept non-SSL connections.
-test_connect_fails($common_connstr, "sslmode=disable",
- qr/\Qno pg_hba.conf entry\E/,
- "server doesn't accept non-SSL connections");
+test_connect_fails(
+ $common_connstr, "sslmode=disable",
+ qr/\Qno pg_hba.conf entry\E/,
+ "server doesn't accept non-SSL connections");
# Try without a root cert. In sslmode=require, this should work. In verify-ca
# or verify-full mode it should fail.
-test_connect_ok($common_connstr, "sslrootcert=invalid sslmode=require",
- "connect without server root cert sslmode=require");
-test_connect_fails($common_connstr, "sslrootcert=invalid sslmode=verify-ca",
- qr/root certificate file "invalid" does not exist/,
- "connect without server root cert sslmode=verify-ca");
-test_connect_fails($common_connstr, "sslrootcert=invalid sslmode=verify-full",
- qr/root certificate file "invalid" does not exist/,
- "connect without server root cert sslmode=verify-full");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=invalid sslmode=require",
+ "connect without server root cert sslmode=require");
+test_connect_fails(
+ $common_connstr,
+ "sslrootcert=invalid sslmode=verify-ca",
+ qr/root certificate file "invalid" does not exist/,
+ "connect without server root cert sslmode=verify-ca");
+test_connect_fails(
+ $common_connstr,
+ "sslrootcert=invalid sslmode=verify-full",
+ qr/root certificate file "invalid" does not exist/,
+ "connect without server root cert sslmode=verify-full");
# Try with wrong root cert, should fail. (We're using the client CA as the
# root, but the server's key is signed by the server CA.)
test_connect_fails($common_connstr,
- "sslrootcert=ssl/client_ca.crt sslmode=require",
- qr/SSL error/,
- "connect with wrong server root cert sslmode=require");
+ "sslrootcert=ssl/client_ca.crt sslmode=require",
+ qr/SSL error/, "connect with wrong server root cert sslmode=require");
test_connect_fails($common_connstr,
- "sslrootcert=ssl/client_ca.crt sslmode=verify-ca",
- qr/SSL error/,
- "connect with wrong server root cert sslmode=verify-ca");
+ "sslrootcert=ssl/client_ca.crt sslmode=verify-ca",
+ qr/SSL error/, "connect with wrong server root cert sslmode=verify-ca");
test_connect_fails($common_connstr,
- "sslrootcert=ssl/client_ca.crt sslmode=verify-full",
- qr/SSL error/,
- "connect with wrong server root cert sslmode=verify-full");
+ "sslrootcert=ssl/client_ca.crt sslmode=verify-full",
+ qr/SSL error/, "connect with wrong server root cert sslmode=verify-full");
# Try with just the server CA's cert. This fails because the root file
# must contain the whole chain up to the root CA.
test_connect_fails($common_connstr,
- "sslrootcert=ssl/server_ca.crt sslmode=verify-ca",
- qr/SSL error/,
- "connect with server CA cert, without root CA");
+ "sslrootcert=ssl/server_ca.crt sslmode=verify-ca",
+ qr/SSL error/, "connect with server CA cert, without root CA");
# And finally, with the correct root cert.
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=require",
- "connect with correct server CA cert file sslmode=require");
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca",
- "connect with correct server CA cert file sslmode=verify-ca");
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-full",
- "connect with correct server CA cert file sslmode=verify-full");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/root+server_ca.crt sslmode=require",
+ "connect with correct server CA cert file sslmode=require");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca",
+ "connect with correct server CA cert file sslmode=verify-ca");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/root+server_ca.crt sslmode=verify-full",
+ "connect with correct server CA cert file sslmode=verify-full");
# Test with cert root file that contains two certificates. The client should
# be able to pick the right one, regardless of the order in the file.
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca",
- "cert root file that contains two certificates, order 1");
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca",
- "cert root file that contains two certificates, order 2");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca",
+ "cert root file that contains two certificates, order 1");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca",
+ "cert root file that contains two certificates, order 2");
# CRL tests
# Invalid CRL filename is the same as no CRL, succeeds
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid",
- "sslcrl option with invalid file name");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid",
+ "sslcrl option with invalid file name");
# A CRL belonging to a different CA is not accepted, fails
-test_connect_fails($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl",
- qr/SSL error/,
- "CRL belonging to a different CA");
+test_connect_fails(
+ $common_connstr,
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl",
+ qr/SSL error/,
+ "CRL belonging to a different CA");
# With the correct CRL, succeeds (this cert is not revoked)
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl",
- "CRL with a non-revoked cert");
+test_connect_ok(
+ $common_connstr,
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl",
+ "CRL with a non-revoked cert");
# Check that connecting with verify-full fails, when the hostname doesn't
# match the hostname in the server's certificate.
$common_connstr =
"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
-test_connect_ok($common_connstr, "sslmode=require host=wronghost.test",
- "mismatch between host name and server certificate sslmode=require");
-test_connect_ok($common_connstr, "sslmode=verify-ca host=wronghost.test",
- "mismatch between host name and server certificate sslmode=verify-ca");
-test_connect_fails($common_connstr, "sslmode=verify-full host=wronghost.test",
- qr/\Qserver certificate for "common-name.pg-ssltest.test" does not match host name "wronghost.test"\E/,
- "mismatch between host name and server certificate sslmode=verify-full");
+test_connect_ok(
+ $common_connstr,
+ "sslmode=require host=wronghost.test",
+ "mismatch between host name and server certificate sslmode=require");
+test_connect_ok(
+ $common_connstr,
+ "sslmode=verify-ca host=wronghost.test",
+ "mismatch between host name and server certificate sslmode=verify-ca");
+test_connect_fails(
+ $common_connstr,
+ "sslmode=verify-full host=wronghost.test",
+qr/\Qserver certificate for "common-name.pg-ssltest.test" does not match host name "wronghost.test"\E/,
+ "mismatch between host name and server certificate sslmode=verify-full");
# Test Subject Alternative Names.
switch_server_cert($node, 'server-multiple-alt-names');
$common_connstr =
"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok($common_connstr, "host=dns1.alt-name.pg-ssltest.test",
- "host name matching with X.509 Subject Alternative Names 1");
-test_connect_ok($common_connstr, "host=dns2.alt-name.pg-ssltest.test",
- "host name matching with X.509 Subject Alternative Names 2");
-test_connect_ok($common_connstr, "host=foo.wildcard.pg-ssltest.test",
- "host name matching with X.509 Subject Alternative Names wildcard");
-
-test_connect_fails($common_connstr, "host=wronghost.alt-name.pg-ssltest.test",
- qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "wronghost.alt-name.pg-ssltest.test"\E/,
- "host name not matching with X.509 Subject Alternative Names");
-test_connect_fails($common_connstr,
- "host=deep.subdomain.wildcard.pg-ssltest.test",
- qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/,
- "host name not matching with X.509 Subject Alternative Names wildcard");
+test_connect_ok(
+ $common_connstr,
+ "host=dns1.alt-name.pg-ssltest.test",
+ "host name matching with X.509 Subject Alternative Names 1");
+test_connect_ok(
+ $common_connstr,
+ "host=dns2.alt-name.pg-ssltest.test",
+ "host name matching with X.509 Subject Alternative Names 2");
+test_connect_ok(
+ $common_connstr,
+ "host=foo.wildcard.pg-ssltest.test",
+ "host name matching with X.509 Subject Alternative Names wildcard");
+
+test_connect_fails(
+ $common_connstr,
+ "host=wronghost.alt-name.pg-ssltest.test",
+qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "wronghost.alt-name.pg-ssltest.test"\E/,
+ "host name not matching with X.509 Subject Alternative Names");
+test_connect_fails(
+ $common_connstr,
+ "host=deep.subdomain.wildcard.pg-ssltest.test",
+qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/,
+ "host name not matching with X.509 Subject Alternative Names wildcard");
# Test certificate with a single Subject Alternative Name. (this gives a
# slightly different error message, that's all)
$common_connstr =
"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok($common_connstr, "host=single.alt-name.pg-ssltest.test",
- "host name matching with a single X.509 Subject Alternative Name");
-
-test_connect_fails($common_connstr, "host=wronghost.alt-name.pg-ssltest.test",
- qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "wronghost.alt-name.pg-ssltest.test"\E/,
- "host name not matching with a single X.509 Subject Alternative Name");
-test_connect_fails($common_connstr,
- "host=deep.subdomain.wildcard.pg-ssltest.test",
- qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/,
- "host name not matching with a single X.509 Subject Alternative Name wildcard");
+test_connect_ok(
+ $common_connstr,
+ "host=single.alt-name.pg-ssltest.test",
+ "host name matching with a single X.509 Subject Alternative Name");
+
+test_connect_fails(
+ $common_connstr,
+ "host=wronghost.alt-name.pg-ssltest.test",
+qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "wronghost.alt-name.pg-ssltest.test"\E/,
+ "host name not matching with a single X.509 Subject Alternative Name");
+test_connect_fails(
+ $common_connstr,
+ "host=deep.subdomain.wildcard.pg-ssltest.test",
+qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/,
+"host name not matching with a single X.509 Subject Alternative Name wildcard"
+);
# Test server certificate with a CN and SANs. Per RFCs 2818 and 6125, the CN
# should be ignored when the certificate has both.
$common_connstr =
"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok($common_connstr, "host=dns1.alt-name.pg-ssltest.test",
- "certificate with both a CN and SANs 1");
-test_connect_ok($common_connstr, "host=dns2.alt-name.pg-ssltest.test",
- "certificate with both a CN and SANs 2");
-test_connect_fails($common_connstr, "host=common-name.pg-ssltest.test",
- qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 1 other name) does not match host name "common-name.pg-ssltest.test"\E/,
- "certificate with both a CN and SANs ignores CN");
+test_connect_ok(
+ $common_connstr,
+ "host=dns1.alt-name.pg-ssltest.test",
+ "certificate with both a CN and SANs 1");
+test_connect_ok(
+ $common_connstr,
+ "host=dns2.alt-name.pg-ssltest.test",
+ "certificate with both a CN and SANs 2");
+test_connect_fails(
+ $common_connstr,
+ "host=common-name.pg-ssltest.test",
+qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 1 other name) does not match host name "common-name.pg-ssltest.test"\E/,
+ "certificate with both a CN and SANs ignores CN");
# Finally, test a server certificate that has no CN or SANs. Of course, that's
# not a very sensible certificate, but libpq should handle it gracefully.
$common_connstr =
"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
-test_connect_ok($common_connstr,
- "sslmode=verify-ca host=common-name.pg-ssltest.test",
- "server certificate without CN or SANs sslmode=verify-ca");
-test_connect_fails($common_connstr,
- "sslmode=verify-full host=common-name.pg-ssltest.test",
- qr/could not get server's host name from server certificate/,
- "server certificate without CN or SANs sslmode=verify-full");
+test_connect_ok(
+ $common_connstr,
+ "sslmode=verify-ca host=common-name.pg-ssltest.test",
+ "server certificate without CN or SANs sslmode=verify-ca");
+test_connect_fails(
+ $common_connstr,
+ "sslmode=verify-full host=common-name.pg-ssltest.test",
+ qr/could not get server's host name from server certificate/,
+ "server certificate without CN or SANs sslmode=verify-full");
# Test that the CRL works
switch_server_cert($node, 'server-revoked');
"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
# Without the CRL, succeeds. With it, fails.
-test_connect_ok($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca",
- "connects without client-side CRL");
-test_connect_fails($common_connstr,
- "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl",
- qr/SSL error/,
- "does not connect with client-side CRL");
+test_connect_ok(
+ $common_connstr,
+ "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca",
+ "connects without client-side CRL");
+test_connect_fails(
+ $common_connstr,
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl",
+ qr/SSL error/,
+ "does not connect with client-side CRL");
### Server-side tests.
###
"sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR";
# no client cert
-test_connect_fails($common_connstr,
- "user=ssltestuser sslcert=invalid",
- qr/connection requires a valid client certificate/,
- "certificate authorization fails without client cert");
+test_connect_fails(
+ $common_connstr,
+ "user=ssltestuser sslcert=invalid",
+ qr/connection requires a valid client certificate/,
+ "certificate authorization fails without client cert");
# correct client cert
-test_connect_ok($common_connstr,
- "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
- "certificate authorization succeeds with correct client cert");
+test_connect_ok(
+ $common_connstr,
+ "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
+ "certificate authorization succeeds with correct client cert");
# client key with wrong permissions
-test_connect_fails($common_connstr,
- "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_wrongperms_tmp.key",
- qr!\Qprivate key file "ssl/client_wrongperms_tmp.key" has group or world access\E!,
- "certificate authorization fails because of file permissions");
+test_connect_fails(
+ $common_connstr,
+"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_wrongperms_tmp.key",
+qr!\Qprivate key file "ssl/client_wrongperms_tmp.key" has group or world access\E!,
+ "certificate authorization fails because of file permissions");
# client cert belonging to another user
-test_connect_fails($common_connstr,
- "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
- qr/certificate authentication failed for user "anotheruser"/,
- "certificate authorization fails with client cert belonging to another user");
+test_connect_fails(
+ $common_connstr,
+ "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
+ qr/certificate authentication failed for user "anotheruser"/,
+"certificate authorization fails with client cert belonging to another user");
# revoked client cert
-test_connect_fails($common_connstr,
- "user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked_tmp.key",
- qr/SSL error/,
- "certificate authorization fails with revoked client cert");
+test_connect_fails(
+ $common_connstr,
+"user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked_tmp.key",
+ qr/SSL error/,
+ "certificate authorization fails with revoked client cert");
# intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file
switch_server_cert($node, 'server-cn-only', 'root_ca');
$common_connstr =
"user=ssltestuser dbname=certdb sslkey=ssl/client_tmp.key sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
-test_connect_ok($common_connstr,
- "sslmode=require sslcert=ssl/client+client_ca.crt",
- "intermediate client certificate is provided by client");
+test_connect_ok(
+ $common_connstr,
+ "sslmode=require sslcert=ssl/client+client_ca.crt",
+ "intermediate client certificate is provided by client");
test_connect_fails($common_connstr, "sslmode=require sslcert=ssl/client.crt",
- qr/SSL error/,
- "intermediate client certificate is missing");
+ qr/SSL error/, "intermediate client certificate is missing");
# clean up
-unlink("ssl/client_tmp.key",
- "ssl/client_wrongperms_tmp.key",
- "ssl/client-revoked_tmp.key");
+unlink("ssl/client_tmp.key", "ssl/client_wrongperms_tmp.key",
+ "ssl/client-revoked_tmp.key");
# Determine whether build supports tls-server-end-point.
my $supports_tls_server_end_point =
- check_pg_config("#define HAVE_X509_GET_SIGNATURE_NID 1");
+ check_pg_config("#define HAVE_X509_GET_SIGNATURE_NID 1");
# Allocation of base connection string shared among multiple tests.
my $common_connstr;
# Configure server for SSL connections, with password handling.
configure_test_server_for_ssl($node, $SERVERHOSTADDR, "scram-sha-256",
- "pass", "scram-sha-256");
+ "pass", "scram-sha-256");
switch_server_cert($node, 'server-cn-only');
$ENV{PGPASSWORD} = "pass";
$common_connstr =
-"user=ssltestuser dbname=trustdb sslmode=require hostaddr=$SERVERHOSTADDR";
+ "user=ssltestuser dbname=trustdb sslmode=require hostaddr=$SERVERHOSTADDR";
# Default settings
test_connect_ok($common_connstr, '',
- "SCRAM authentication with default channel binding");
+ "SCRAM authentication with default channel binding");
# Channel binding settings
-test_connect_ok($common_connstr,
+test_connect_ok(
+ $common_connstr,
"scram_channel_binding=tls-unique",
"SCRAM authentication with tls-unique as channel binding");
-test_connect_ok($common_connstr,
- "scram_channel_binding=''",
- "SCRAM authentication without channel binding");
+test_connect_ok($common_connstr, "scram_channel_binding=''",
+ "SCRAM authentication without channel binding");
if ($supports_tls_server_end_point)
{
- test_connect_ok($common_connstr,
- "scram_channel_binding=tls-server-end-point",
- "SCRAM authentication with tls-server-end-point as channel binding");
+ test_connect_ok(
+ $common_connstr,
+ "scram_channel_binding=tls-server-end-point",
+ "SCRAM authentication with tls-server-end-point as channel binding");
}
else
{
- test_connect_fails($common_connstr,
- "scram_channel_binding=tls-server-end-point",
- qr/channel binding type "tls-server-end-point" is not supported by this build/,
- "SCRAM authentication with tls-server-end-point as channel binding");
+ test_connect_fails(
+ $common_connstr,
+ "scram_channel_binding=tls-server-end-point",
+qr/channel binding type "tls-server-end-point" is not supported by this build/,
+ "SCRAM authentication with tls-server-end-point as channel binding");
$number_of_tests++;
}
-test_connect_fails($common_connstr,
+test_connect_fails(
+ $common_connstr,
"scram_channel_binding=not-exists",
qr/unsupported SCRAM channel-binding type/,
"SCRAM authentication with invalid channel binding");
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_mixed (a, b) VALUES (1, 'foo')");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))");
+"CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))"
+);
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
# replication of the table with included index
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))");
+"CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_include SELECT generate_series(1,50)");
-$node_publisher->safe_psql('postgres', "DELETE FROM tab_include WHERE a > 20");
+$node_publisher->safe_psql('postgres',
+ "DELETE FROM tab_include WHERE a > 20");
$node_publisher->safe_psql('postgres', "UPDATE tab_include SET a = -a");
$node_publisher->wait_for_catchup($appname);
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_include");
-is($result, qq(20|-20|-1), 'check replicated changes with primary key index with included columns');
+is($result, qq(20|-20|-1),
+ 'check replicated changes with primary key index with included columns');
# insert some duplicate rows
$node_publisher->safe_psql('postgres',
# Wait for initial sync to finish as well
my $synced_query =
- "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');";
+"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Wait for initial sync to finish as well
my $synced_query =
- "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');";
+"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
-$node_publisher->safe_psql('postgres', q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');});
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');});
$node_publisher->wait_for_catchup($appname);
-is($node_subscriber->safe_psql('postgres', q{SELECT a, b FROM test1}),
- qq(1|one
+is( $node_subscriber->safe_psql('postgres', q{SELECT a, b FROM test1}),
+ qq(1|one
2|two),
- 'initial data replicated to subscriber');
+ 'initial data replicated to subscriber');
# DDL that causes a heap rewrite
my $ddl2 = "ALTER TABLE test1 ADD c int NOT NULL DEFAULT 0;";
$node_publisher->wait_for_catchup($appname);
-$node_publisher->safe_psql('postgres', q{INSERT INTO test1 (a, b, c) VALUES (3, 'three', 33);});
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 (a, b, c) VALUES (3, 'three', 33);});
$node_publisher->wait_for_catchup($appname);
-is($node_subscriber->safe_psql('postgres', q{SELECT a, b, c FROM test1}),
- qq(1|one|0
+is( $node_subscriber->safe_psql('postgres', q{SELECT a, b, c FROM test1}),
+ qq(1|one|0
2|two|0
3|three|33),
- 'data replicated to subscriber');
+ 'data replicated to subscriber');
$node_subscriber->stop;
$node_publisher->stop;
$node_publisher->wait_for_catchup($appname);
-$node_subscriber->safe_psql('postgres', q{
+$node_subscriber->safe_psql(
+ 'postgres', q{
BEGIN;
ALTER SUBSCRIPTION mysub DISABLE;
ALTER SUBSCRIPTION mysub SET (slot_name = NONE);
"INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999, e int GENERATED BY DEFAULT AS IDENTITY)");
+$node_subscriber->safe_psql('postgres',
+"CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999, e int GENERATED BY DEFAULT AS IDENTITY)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
or die "Timed out while waiting for subscriber to synchronize data";
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# Update the rows on the publisher and check the additional columns on
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab");
-is($result, qq(2|2|2|2), 'check extra columns contain local defaults after copy');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab");
+is($result, qq(2|2|2|2),
+ 'check extra columns contain local defaults after copy');
# Change the local values of the extra columns on the subscriber,
# update publisher, and check that subscriber retains the expected
# values
-$node_subscriber->safe_psql('postgres', "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'");
-$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = md5(a::text)");
+$node_subscriber->safe_psql('postgres',
+ "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'"
+);
+$node_publisher->safe_psql('postgres',
+ "UPDATE test_tab SET b = md5(a::text)");
$node_publisher->wait_for_catchup($appname);
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab");
+$result = $node_subscriber->safe_psql('postgres',
+"SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab"
+);
is($result, qq(2|2|2), 'check extra columns contain locally changed data');
# Another insert
$node_publisher->wait_for_catchup($appname);
$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab");
-is($result, qq(3|3|3|3), 'check extra columns contain local defaults after apply');
+ $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab");
+is($result, qq(3|3|3|3),
+ 'check extra columns contain local defaults after apply');
$node_subscriber->stop;
$node_publisher->stop;
"CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;"
);
-$node_publisher->safe_psql('postgres', q{CREATE TABLE test1 (a int PRIMARY KEY, b text)});
-$node_publisher->safe_psql('postgres', q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');});
+$node_publisher->safe_psql('postgres',
+ q{CREATE TABLE test1 (a int PRIMARY KEY, b text)});
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');});
-$node_subscriber->safe_psql('postgres', q{CREATE TABLE test1 (a int PRIMARY KEY, b text);});
+$node_subscriber->safe_psql('postgres',
+ q{CREATE TABLE test1 (a int PRIMARY KEY, b text);});
$node_publisher->wait_for_catchup($appname);
# need to make sure they are properly ignored. (bug #15044)
# create a MV with some data
-$node_publisher->safe_psql('postgres', q{CREATE MATERIALIZED VIEW testmv1 AS SELECT * FROM test1;});
+$node_publisher->safe_psql('postgres',
+ q{CREATE MATERIALIZED VIEW testmv1 AS SELECT * FROM test1;});
$node_publisher->wait_for_catchup($appname);
+
# There is no equivalent relation on the subscriber, but MV data is
# not replicated, so this does not hang.
"CREATE TABLE tab4 (x int PRIMARY KEY, y int REFERENCES tab3)");
$node_subscriber->safe_psql('postgres',
- "CREATE SEQUENCE seq1 OWNED BY tab1.a"
-);
-$node_subscriber->safe_psql('postgres',
- "ALTER SEQUENCE seq1 START 101"
-);
+ "CREATE SEQUENCE seq1 OWNED BY tab1.a");
+$node_subscriber->safe_psql('postgres', "ALTER SEQUENCE seq1 START 101");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION pub1 FOR TABLE tab1");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION pub3 FOR TABLE tab3, tab4");
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr application_name=sub1' PUBLICATION pub1");
+"CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr application_name=sub1' PUBLICATION pub1"
+);
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr application_name=sub2' PUBLICATION pub2");
+"CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr application_name=sub2' PUBLICATION pub2"
+);
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION sub3 CONNECTION '$publisher_connstr application_name=sub3' PUBLICATION pub3");
+"CREATE SUBSCRIPTION sub3 CONNECTION '$publisher_connstr application_name=sub3' PUBLICATION pub3"
+);
# Wait for initial sync of all subscriptions
my $synced_query =
# insert data to truncate
-$node_subscriber->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1), (2), (3)");
$node_publisher->wait_for_catchup('sub1');
my $result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab1");
-is($result, qq(0||),
- 'truncate replicated');
+is($result, qq(0||), 'truncate replicated');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT nextval('seq1')");
-is($result, qq(1),
- 'sequence not restarted');
+$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')");
+is($result, qq(1), 'sequence not restarted');
# truncate with restart identity
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT nextval('seq1')");
-is($result, qq(101),
- 'truncate restarted identities');
+$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')");
+is($result, qq(101), 'truncate restarted identities');
# test publication that does not replicate truncate
-$node_subscriber->safe_psql('postgres', "INSERT INTO tab2 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab2 VALUES (1), (2), (3)");
$node_publisher->safe_psql('postgres', "TRUNCATE tab2");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab2");
-is($result, qq(3|1|3),
- 'truncate not replicated');
+is($result, qq(3|1|3), 'truncate not replicated');
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION pub2 SET (publish = 'insert, truncate')");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab2");
-is($result, qq(0||),
- 'truncate replicated after publication change');
+is($result, qq(0||), 'truncate replicated after publication change');
# test multiple tables connected by foreign keys
-$node_subscriber->safe_psql('postgres', "INSERT INTO tab3 VALUES (1), (2), (3)");
-$node_subscriber->safe_psql('postgres', "INSERT INTO tab4 VALUES (11, 1), (111, 1), (22, 2)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab3 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab4 VALUES (11, 1), (111, 1), (22, 2)");
$node_publisher->safe_psql('postgres', "TRUNCATE tab3, tab4");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab3");
-is($result, qq(0||),
- 'truncate of multiple tables replicated');
+is($result, qq(0||), 'truncate of multiple tables replicated');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(x), max(x) FROM tab4");
-is($result, qq(0||),
- 'truncate of multiple tables replicated');
+is($result, qq(0||), 'truncate of multiple tables replicated');
# test truncate of multiple tables, some of which are not published
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION sub2");
$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub2");
-$node_subscriber->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (2), (3)");
-$node_subscriber->safe_psql('postgres', "INSERT INTO tab2 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab1 VALUES (1), (2), (3)");
+$node_subscriber->safe_psql('postgres',
+ "INSERT INTO tab2 VALUES (1), (2), (3)");
$node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab1");
-is($result, qq(0||),
- 'truncate of multiple tables some not published');
+is($result, qq(0||), 'truncate of multiple tables some not published');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab2");
-is($result, qq(3|1|3),
- 'truncate of multiple tables some not published');
+is($result, qq(3|1|3), 'truncate of multiple tables some not published');
{
my $commit = $1;
my $tag = $2;
- if ($tag =~ /^REL_\d+_\d+$/
- || $tag =~ /^REL\d+_\d+$/
- || $tag =~ /^REL\d+_\d+_\d+$/)
+ if ( $tag =~ /^REL_\d+_\d+$/
+ || $tag =~ /^REL\d+_\d+$/
+ || $tag =~ /^REL\d+_\d+_\d+$/)
{
$rel_tags{$commit} = $tag;
}
# Don't find files of in-tree temporary installations.
$_ eq 'share' and $File::Find::prune = 1;
- }
+ }
},
@top_dir);
CopySetOfFiles('config files', $sample_files, $target . '/share/');
'Information schema data', $target . '/share/',
'src/backend/catalog/', 'sql_features.txt');
CopyFiles(
- 'Error code data', $target . '/share/',
- 'src/backend/utils/', 'errcodes.txt');
+ 'Error code data', $target . '/share/',
+ 'src/backend/utils/', 'errcodes.txt');
GenerateConversionScript($target);
GenerateTimezoneFiles($target, $conf);
GenerateTsearchFiles($target);
# Don't find files of in-tree temporary installations.
$_ eq 'share' and $File::Find::prune = 1;
- }
+ }
},
@pldirs);
CopySetOfFiles('PL Extension files',
{ wanted => sub {
/^nls\.mk\z/s
&& !push(@flist, $File::Find::name);
- }
+ }
},
"src");
foreach (@flist)
my $perl_path = $solution->{options}->{perl} . '\lib\CORE\*perl*';
- # ActivePerl 5.16 provided perl516.lib; 5.18 provided libperl518.a
- # Starting with ActivePerl 5.24, both perlnn.lib and libperlnn.a are provided.
- # In this case, prefer .lib.
+# ActivePerl 5.16 provided perl516.lib; 5.18 provided libperl518.a
+# Starting with ActivePerl 5.24, both perlnn.lib and libperlnn.a are provided.
+# In this case, prefer .lib.
my @perl_libs =
- reverse sort grep { /perl\d+\.lib$|libperl\d+\.a$/ } glob($perl_path);
+ reverse sort grep { /perl\d+\.lib$|libperl\d+\.a$/ }
+ glob($perl_path);
if (@perl_libs > 0)
{
$plperl->AddLibrary($perl_libs[0]);
'hstore', 'contrib/hstore');
my $jsonb_plperl = AddTransformModule(
'jsonb_plperl', 'contrib/jsonb_plperl',
- 'plperl', 'src/pl/plperl');
+ 'plperl', 'src/pl/plperl');
foreach my $f (@perl_embed_ccflags)
{
# Add a simple transform module
sub AddTransformModule
{
- my $n = shift;
- my $n_src = shift;
- my $pl_proj_name = shift;
- my $pl_src = shift;
- my $type_name = shift;
- my $type_src = shift;
+ my $n = shift;
+ my $n_src = shift;
+ my $pl_proj_name = shift;
+ my $pl_src = shift;
+ my $type_name = shift;
+ my $type_src = shift;
my $type_proj = undef;
if ($type_name)
print "Building $out from $in (contrib/$n)...\n";
my $cont = Project::read_file("contrib/$n/$in");
my $dn = $out;
- $dn =~ s/\.sql$//;
+ $dn =~ s/\.sql$//;
$cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g;
my $o;
open($o, '>', "contrib/$n/$out")
{
next
if $subdir eq "\$(top_builddir)/src/timezone"
- ; #special case for non-standard include
+ ; #special case for non-standard include
next
if $reldir . "/" . $subdir eq "src/backend/port/darwin";
unless $options->{wal_blocksize}; # undef or 0 means default
die "Bad wal_blocksize $options->{wal_blocksize}"
unless grep { $_ == $options->{wal_blocksize} }
- (1, 2, 4, 8, 16, 32, 64);
+ (1, 2, 4, 8, 16, 32, 64);
$options->{wal_segsize} = 16
unless $options->{wal_segsize}; # undef or 0 means default
die "Bad wal_segsize $options->{wal_segsize}"
"\n";
print $o "#define RELSEG_SIZE ",
(1024 / $self->{options}->{blocksize}) *
- $self->{options}->{segsize} *
- 1024, "\n";
+ $self->{options}->{segsize} * 1024, "\n";
print $o "#define XLOG_BLCKSZ ",
1024 * $self->{options}->{wal_blocksize}, "\n";
chdir('src/backend/utils');
my $pg_language_dat = '../../../src/include/catalog/pg_language.dat';
- my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat';
- if (IsNewer(
- 'fmgrtab.c', $pg_language_dat)
- || IsNewer(
- 'fmgrtab.c', $pg_proc_dat)
- || IsNewer(
- 'fmgrtab.c', '../../../src/include/access/transam.h')
- )
+ my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat';
+ if ( IsNewer('fmgrtab.c', $pg_language_dat)
+ || IsNewer('fmgrtab.c', $pg_proc_dat)
+ || IsNewer('fmgrtab.c', '../../../src/include/access/transam.h'))
{
system(
-"perl -I ../catalog Gen_fmgrtab.pl -I../../../src/include/ $pg_language_dat $pg_proc_dat");
+"perl -I ../catalog Gen_fmgrtab.pl -I../../../src/include/ $pg_language_dat $pg_proc_dat"
+ );
}
chdir('../../..');
$mf =~ /^POSTGRES_BKI_DATA\s*:?=[^,]+,(.*)\)$/gm
|| croak "Could not find POSTGRES_BKI_DATA in Makefile\n";
my @bki_data = split /\s+/, $1;
+
foreach my $bki (@bki_srcs, @bki_data)
{
next if $bki eq "";
{
chdir('src/backend/catalog');
my $bki_srcs = join(' ../../../src/include/catalog/', @bki_srcs);
- system("perl genbki.pl --set-version=$self->{majorver} $bki_srcs");
+ system(
+ "perl genbki.pl --set-version=$self->{majorver} $bki_srcs");
chdir('../../..');
# Copy generated headers to include directory.
opendir(my $dh, 'src/backend/catalog/')
|| die "Can't opendir src/backend/catalog/ $!";
- my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh);
+ my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh);
closedir $dh;
foreach my $def_header (@def_headers)
{
copyFile(
- "src/backend/catalog/$def_header",
- "src/include/catalog/$def_header");
+ "src/backend/catalog/$def_header",
+ "src/include/catalog/$def_header");
}
copyFile(
'src/backend/catalog/schemapg.h',
{
return new VS2015Solution(@_);
}
- # visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it.
- elsif (($visualStudioVersion ge '14.10') or ($visualStudioVersion eq '15.00'))
+
+# visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it.
+ elsif (($visualStudioVersion ge '14.10')
+ or ($visualStudioVersion eq '15.00'))
{
return new VS2017Solution(@_);
}
{
return new VC2015Project(@_);
}
- # visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it.
- elsif (($visualStudioVersion ge '14.10') or ($visualStudioVersion eq '15.00'))
+
+# visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it.
+ elsif (($visualStudioVersion ge '14.10')
+ or ($visualStudioVersion eq '15.00'))
{
return new VC2017Project(@_);
}
sub _GetVisualStudioVersion
{
my ($major, $minor) = @_;
- # visual 2017 hasn't changed the nmake version to 15, so still using the older version for comparison.
+
+# visual 2017 hasn't changed the nmake version to 15, so still using the older version for comparison.
if ($major > 14)
{
carp
usage()
unless scalar(@ARGV) == 2
- && ( ($ARGV[0] =~ /\\([^\\]+$)/)
- && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64'));
+ && ( ($ARGV[0] =~ /\\([^\\]+$)/)
+ && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64'));
my $defname = uc $1;
my $deffile = "$ARGV[0]/$defname.def";
my $platform = $ARGV[1];
{
next unless -d "$dir/sql" && -d "$dir/expected";
my $lang;
- if ($dir eq 'plpgsql/src') {
+ if ($dir eq 'plpgsql/src')
+ {
$lang = 'plpgsql';
}
- elsif ($dir eq 'tcl') {
+ elsif ($dir eq 'tcl')
+ {
$lang = 'pltcl';
}
- else {
+ else
+ {
$lang = $dir;
}
if ($lang eq 'plpython')
chomp;
push @hfiles, $_
unless m|^src/include/port/|
- || m|^src/backend/port/\w+/|;
+ || m|^src/backend/port/\w+/|;
}
close $pipe or die "$FIND failed: $!";
# easier to configure. Note that the typedefs need trailing newlines.
my @whitelist = ("bool\n");
-my %blacklist = map { +"$_\n" => 1 }
- qw( FD_SET date interval timestamp ANY
- abs allocfunc iterator other pointer printfunc reference string type );
+my %blacklist = map { +"$_\n" => 1 } qw( FD_SET date interval timestamp ANY
+ abs allocfunc iterator other pointer printfunc reference string type );
# globals
my @files;
push(@typedefs, @whitelist);
# remove blacklisted entries
- @typedefs = grep { ! $blacklist{$_} } @typedefs;
+ @typedefs = grep { !$blacklist{$_} } @typedefs;
# write filtered typedefs
my $filter_typedefs_fh = new File::Temp(TEMPLATE => "pgtypedefXXXXX");
&& -f _
&& /^.*\.[ch]\z/s
&& push(@files, $File::Find::name);
- }
+ }
},
$code_base) if $code_base;